X-Git-Url: https://pd.if.org/git/?p=zos;a=blobdiff_plain;f=mem.h;fp=mem.h;h=9d5630443d5de5d8f642a8441e0661826f16dd60;hp=0000000000000000000000000000000000000000;hb=75986ab9ae7e4b5b0e397559c16710ac014ecfa8;hpb=3afdcc11364fbed161b57409f90fe572fc789047 diff --git a/mem.h b/mem.h new file mode 100644 index 0000000..9d56304 --- /dev/null +++ b/mem.h @@ -0,0 +1,328 @@ +#ifndef MEM_H_ +#define MEM_H_ 1 + +#include +#include + +#include "multiboot.h" + +typedef uintptr_t paddr_t; +typedef uintptr_t vaddr_t; + +extern uintptr_t _kernel_end; +extern uintptr_t _kernel_vma; +extern uintptr_t _kernel_phys_end; +extern uintptr_t _kernel_size; +extern uintptr_t _asm_physmap; + +extern void *bios_ebda; + +/* These are in memx64.s */ +extern void flush_tlb(void *va); +extern void switch_address_space(uintptr_t cr3); + +void vmem_test(); + +/* map one physical page to a virtual address */ +#define PHY2VIRTP(pa) ( (void *)(((paddr_t)pa) + memory.physmap) ) +/* only works for the linear mapped physical memory */ +#define VIRTP2PHY(va) ( (paddr_t)(((vaddr_t)(va)) - memory.physmap) ) +#define KB(x) ((x) * (1<<10)) +#define MB(x) ((x) * (1<<20)) +#define GB(x) (((uint64_t)x) * (1ULL<<30)) +#define TB(x) (((uint64_t)x) * (1ULL<<40)) +uintptr_t mem_align(uintptr_t a, size_t alignment); + +/* + * kernel is at 0xFFFFFFFF80000000 and up + * we will map in all physical memory below that + */ + +/* structure of physical frames */ +struct frame { + struct frame *next; + void *paddr; /* the physical address of the frame */ + struct frame *prev; + void *top, *base; + uint64_t magic; +}; + +/* a map of memory */ +/* TODO fix these types. maybe add a physvend */ +struct memory { + void *kbase; /* at 0xFFFFFFFF00000000 */ + void *kstack; /* grow down from -1 ? */ + uintptr_t kend; /* virtual address of top of kernel */ + uintptr_t kphysend; /* physical address of top of kernel */ + uintptr_t phys_max; /* maximum (usable) physical address + 1 */ + uintptr_t physmap; /* all physical memory is mapped starting here */ + /* kbase + kernel size ?, then put dynamic kernel structures above that, + * growing toward the stack? + */ +}; + +/* TODO keep track of the kernel virtual address of page, we could + * re-alloc it in the same spot + */ +struct pstack { + paddr_t page_addr[510]; /* physical addresses of frames */ + size_t pages; /* free pages in this stack frame */ + struct pstack *next; /* address of next stack frame */ +}; + +/* + * free a page: + * if there is no room in the stack, then + * set the phys.stack.next to the new page + * map the new page in at phys.stack + * then add to phys.stack.pages + * the physical address to the stack.pages + * + * get a page: + * if there are no pages free on the stack: + * map in the phys.stack.next to phys.stack + * hand out the old page, which is on pages[free] + * + * it's probably worth memoizing the virtual address + * of the exact place where the stack vm map page is stored + * in the page tables + */ +extern struct memory memory; + +/* physical memory manager */ +struct phys { + struct frame *base; + struct frame *top; + size_t frame_size; + int64_t offset; /* paddr = vaddr + offset */ + struct phys *next; + + vaddr_t kmap; /* physical address of page table entry for *stack */ + vaddr_t *page_map; /* temporary page map space */ + struct pstack *stack; /* virtual address of top of page stack */ + size_t free; /* total number of free pages */ + +}; +/* + * pa = va + pb - vb + * va = pa + vb - pb + */ + +extern struct memory memory; +extern struct phys phys; + +/* + * ops: + * add - add a contiguous chunk of physical memory and a virtual address base. + * alloc - get a page of memory + * free - free a page of memory + * tinyalloc - get a really small page, 256 bytes + * tinyfree - + */ + +/* TODO lazy setup of the memory block stack */ +/* TODO modify phys_init to be "more core" to support + * discontiguous memory addresses + */ + +/* These don't have to be the same as the hardware paging, but + * it probably helps + */ +/* Hmm, need to map in memory for this to work */ +/* + * pa = va + pb - vb + * va = pa + vb - pb + */ + +uint64_t getcr3(); /* probably in assembly */ + +paddr_t create_addrspace(void); + + +void test_address(void *); +void phys_init(struct multiboot_info *mbi); +void mem_init(); +paddr_t palloc(); +void pfree(paddr_t vmem); +void *koalloc(size_t size); +void kofree(void *, size_t size); +void dumpostacks(int f, int t); + +uint64_t makecr3(void *pml4) ; +void *virt2phys(void *vaddr) ; + +extern uint64_t kernel_space; /* physical address of kernel space? */ +#define MEM_KERNEL (kernel_space) +#define KERNEL_PML4 ((struct pml4t *)PHY2VIRTP(kernel_space)) + +extern paddr_t pci_start; + +struct cr3 { + uint64_t reslow:3; /* should be cleared to zero */ + uint64_t pwt:1; /* page writethrough */ + uint64_t pcd:1; /* page cache disable */ + uint64_t resmed:7; /* clear to zero */ + uint64_t addr:40; /* pml4 page base physical address shifted right 12 */ + uint64_t reshigh:12; /* clear to zero */ +}; + +/* TODO last bit (63) is actually a no-execute bit */ +struct pml4e { + uint64_t present:1; + uint64_t rw:1; + uint64_t user:1; + uint64_t pwt:1; + uint64_t pcd:1; + uint64_t a:1; + uint64_t ign:1; + uint64_t mbz:2; + uint64_t avl:3; + uint64_t addr:40; /* pml4 page base physical address shifted right 12 */ + uint64_t reshigh:12; /* clear to zero */ +}; + +struct pdpe { + uint64_t present:1; + uint64_t rw:1; + uint64_t user:1; + uint64_t pwt:1; + uint64_t pcd:1; + uint64_t a:1; + uint64_t ign:1; + uint64_t ps:1; /* I think this is the PS bit: 0 for 4KB or 2MB, 1 for 1GB */ + uint64_t mbz:1; + uint64_t avl:3; + uint64_t addr:40; /* pml4 page base physical address shifted right 12 */ + uint64_t reshigh:12; /* clear to zero */ +}; + +struct pde { + uint64_t present:1; + uint64_t rw:1; + uint64_t user:1; + uint64_t pwt:1; + uint64_t pcd:1; + uint64_t a:1; + uint64_t ign:1; + uint64_t ps:1; /* I think this is the PS bit: 0 for 4KB */ + uint64_t mbz:1; + uint64_t avl:3; + uint64_t addr:40; /* pml4 page base physical address shifted right 12 */ + uint64_t reshigh:12; /* clear to zero */ +}; + +struct pte { + uint64_t present:1; + uint64_t rw:1; + uint64_t user:1; + uint64_t pwt:1; + uint64_t pcd:1; + uint64_t a:1; + uint64_t d:1; + uint64_t pat:1; + uint64_t g:1; + uint64_t avl:3; + uint64_t addr:40; /* pml4 page base physical address shifted right 12 */ + uint64_t reshigh:12; /* clear to zero */ +}; + +struct pml4t { + struct pml4e pml4e[512]; +}; + +struct pdpt { + struct pdpe pdpe[512]; +}; + +struct pdt { + struct pde pde[512]; +}; + +struct pt { + struct pte pte[512]; +}; + +struct vaddr_decode { + /* broken down virtual address */ + int pml4offset, pdpoffset, pdoffset, ptoffset; + paddr_t offset; /* physical page offset */ + + struct pml4t *pml4t; /* pml4 base address */ + paddr_t pml4_phys; /* pml4 physical base address */ + + struct pml4e pml4e; /* entry in page map level 4 */ + /* at pml4 + 8 * pml4offset */ + + struct pdpe pdpe; /* entry in page directory pointer table */ + /* at virt(pml4e.addr) + 8 * pdpoffset */ + /* does not exist unless pml4e.present */ + /* pdpe.ps == 1 means 1 GB pages, and the next level doesn't exist */ + /* offset = vaddr & 0x3fffffff, physical page = standard - offset */ + + struct pde pde; /* entry in page directory table */ + /* at virt(pdpe.addr) + 8 * pdoffset */ + /* does not exist unless pdpe.present */ + /* pde.ps == 0 means 4 KB pages and the next level exists if present */ + /* pds.ps == 1 means 2 MB pages, pts doesn't exist and + * offset = vaddr & 0x1fffff; i.e. low 21 bits + * physical page address = same mask minus offset bits + */ + + struct pte pte; /* entry in page table */ + /* at virt(pde.addr) + 8 * ptoffset */ + /* does not exist unless pde.present */ + /* offset = vaddr & 0xfff; i.e. low 12 bits */ + /* physical page address = pde.addr, or vaddr & 0xffffffffff000 */ + + struct pte *pteptr; /* a pointer to the actual entry */ + + /* actual physical addresses */ + paddr_t pdpt_phys; + paddr_t pdt_phys; + paddr_t pt_phys; + + int present; /* physical address actually mapped */ + paddr_t paddr; /* decoded physical address */ + int level; + paddr_t page; /* physical address of page */ + size_t pagesize; +}; + +void decode_vaddr(struct vaddr_decode *ds, uint64_t space, vaddr_t vaddr); +void print_decode(struct vaddr_decode *ds, uint64_t space, vaddr_t vaddr); + +/* returns addr. addr may be null in which case the function will allocate virtual space */ +void *map_page(uint64_t space, vaddr_t addr, paddr_t pa, unsigned int flags); +void vmapz(uint64_t space, vaddr_t addr, size_t length, unsigned int flags); + +#define PT_PRESENT 0x1 +#define MEM_PCD (1<<4) +#define MEM_PWT (1<<3) +#define MEM_USER (1<<2) +#define MEM_RW (1<<1) +#define MAP_TRACE (1<<16) + +#define MEM_NOCACHE (MEM_PCD | MEM_PWT) +#define MEM_MMIO (PT_PRESENT|MEM_NOCACHE|MEM_RW) +#define MEM_USERSPACE (MEM_USER | MEM_RW) + +#if 0 +typedef struct page { + u32int present : 1; // Page present in memory + u32int rw : 1; // Read-only if clear, readwrite if set + u32int user : 1; // Supervisor level only if clear + u32int accessed : 1; // Has the page been accessed since last refresh? + u32int dirty : 1; // Has the page been written to since last refresh? + u32int unused : 7; // Amalgamation of unused and reserved bits + u32int frame : 20; // Frame address (shifted right 12 bits) +} page_t; + +#endif + +struct bios_data_area { + uint16_t comaddr[4]; + uint16_t lpt[3]; + uint16_t ebda; /* <<4 to get actual */ +}; + +#endif