Use static page table for boot, mapping first 4Mbyte; no more segment trick

Allocate proper kernel page table immediately in main using boot allocator
Remove pginit
Simplify address space layout a tiny bit
More to come (e.g., superpages to simplify static table)
This commit is contained in:
Frans Kaashoek 2011-08-09 21:37:35 -04:00
parent 3a03810643
commit 66ba8079c7
9 changed files with 1101 additions and 103 deletions

View file

@ -72,7 +72,8 @@ AS = $(TOOLPREFIX)gas
LD = $(TOOLPREFIX)ld
OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer
#CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -Wall -MD -ggdb -m32 -Werror -fno-omit-frame-pointer
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
ASFLAGS = -m32 -gdwarf-2 -Wa,-divide
# FreeBSD ld wants ``elf_i386_fbsd''
@ -198,7 +199,7 @@ QEMUGDB = $(shell if $(QEMU) -help | grep -q '^-gdb'; \
then echo "-gdb tcp::$(GDBPORT)"; \
else echo "-s -p $(GDBPORT)"; fi)
ifndef CPUS
CPUS := 2
CPUS := 1
endif
QEMUOPTS = -hdb fs.img xv6.img -smp $(CPUS) -m 512

5
defs.h
View file

@ -62,7 +62,7 @@ extern uchar ioapicid;
void ioapicinit(void);
// kalloc.c
char* pgalloc(void);
char* boot_alloc(void);
char* kalloc(void);
void kfree(char*);
void kinit(void);
@ -161,11 +161,10 @@ void uartintr(void);
void uartputc(int);
// vm.c
void pginit(char* (*alloc)());
void seginit(void);
void kvmalloc(void);
void vmenable(void);
pde_t* setupkvm(void);
pde_t* setupkvm(char* (*alloc)());
char* uva2ka(pde_t*, char*);
int allocuvm(pde_t*, uint, uint);
int deallocuvm(pde_t*, uint, uint);

2
exec.c
View file

@ -29,7 +29,7 @@ exec(char *path, char **argv)
if(elf.magic != ELF_MAGIC)
goto bad;
if((pgdir = setupkvm()) == 0)
if((pgdir = setupkvm(kalloc)) == 0)
goto bad;
// Load program into memory.

View file

@ -23,11 +23,13 @@ char *newend;
// simple page allocator to get off the ground during boot
char *
pgalloc(void)
boot_alloc(void)
{
if (newend == 0)
newend = end;
if ((uint) newend >= KERNBASE + 0x400000)
panic("only first 4Mbyte are mapped during boot");
void *p = (void*)PGROUNDUP((uint)newend);
memset(p, 0, PGSIZE);
newend = newend + PGSIZE;

1062
main.c

File diff suppressed because it is too large Load diff

View file

@ -5,8 +5,7 @@
#define KSTKSIZE (8*PGSIZE) // size of a kernel stack
#define IOSPACEB 0x0A0000 // begin IO space
#define IOSPACEE 0x100000 // end IO space
#define DEVSPACE 0xFE000000 // other devices are in the top of the phys address space
#define PHYSTOP 0xE000000 // use phys mem up to here as free pool
// Key addresses for address space layout (see kmap in vm.c for the layout)

View file

@ -41,8 +41,8 @@ multiboot_header:
# boot loader - bootasm.S - sets up.
.globl multiboot_entry
multiboot_entry:
lgdt V2P_WO(gdtdesc)
ljmp $(SEG_KCODE<<3), $mbstart32
# lgdt V2P_WO(gdtdesc)
# ljmp $(SEG_KCODE<<3), $mbstart32
mbstart32:
# Set up the protected-mode data segment registers
@ -54,21 +54,22 @@ mbstart32:
movw %ax, %fs # -> FS
movw %ax, %gs # -> GS
movl $(V2P_WO(bootpgdir)), %eax
movl %eax, %cr3
# Turn on paging.
movl %cr0, %eax
orl $(CR0_PE|CR0_PG|CR0_WP), %eax
movl %eax, %cr0
# now switch to using addresses above KERNBASE
# call addresses are pc-relative so we jump though this hoop:
mov $relocated, %eax
jmp *%eax
relocated:
# Set up the stack pointer and call into C.
movl $(stack + STACK), %esp
call main
spin:
jmp spin
# Bootstrap GDT
.p2align 2 # force 4 byte alignment
gdt:
SEG_NULLASM # null seg
SEG_ASM(STA_X|STA_R, -KERNBASE, 0xffffffff) # code seg
SEG_ASM(STA_W, -KERNBASE, 0xffffffff) # data seg
gdtdesc:
.word (gdtdesc - gdt - 1) # sizeof(gdt) - 1
.long V2P_WO(gdt) # address gdt
.comm stack, STACK

2
proc.c
View file

@ -83,7 +83,7 @@ userinit(void)
p = allocproc();
initproc = p;
if((p->pgdir = setupkvm()) == 0)
if((p->pgdir = setupkvm(kalloc)) == 0)
panic("userinit: out of memory?");
inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
p->sz = PGSIZE;

94
vm.c
View file

@ -11,64 +11,6 @@ extern char data[]; // defined in data.S
static pde_t *kpgdir; // for use in scheduler()
struct segdesc gdt[NSEGS];
// page map for during boot
// XXX build a static page table in assembly
static void
pgmap(void *va, void *last, uint pa)
{
pde_t *pde;
pte_t *pgtab;
pte_t *pte;
for(;;){
pde = &kpgdir[PDX(va)];
pde_t pdev = *pde;
if (pdev == 0) {
pgtab = (pte_t *) pgalloc();
*pde = v2p(pgtab) | PTE_P | PTE_W;
} else {
pgtab = (pte_t*)p2v(PTE_ADDR(pdev));
}
pte = &pgtab[PTX(va)];
*pte = pa | PTE_W | PTE_P;
if(va == last)
break;
va += PGSIZE;
pa += PGSIZE;
}
}
// set up a page table to get off the ground
void
pginit(char* (*alloc)(void))
{
uint cr0;
kpgdir = (pde_t *) alloc();
pgmap((void *) 0, (void *) PHYSTOP, 0); // map pa 0 at va 0
pgmap((void *) KERNBASE, (void *) (KERNBASE+PHYSTOP), 0); // map pa 0 at va KERNBASE
pgmap((void*)0xFE000000, 0, 0xFE000000);
switchkvm(); // load kpgdir into cr3
cr0 = rcr0();
cr0 |= CR0_PG;
lcr0(cr0); // paging on
// new gdt
gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
lgdt((void *)v2p(gdt), sizeof(gdt));
loadgs(SEG_KDATA << 3);
loadfs(SEG_KDATA << 3);
loades(SEG_KDATA << 3);
loadds(SEG_KDATA << 3);
loadss(SEG_KDATA << 3);
__asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (SEG_KCODE << 3)); // reload cs
}
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
@ -101,7 +43,7 @@ seginit(void)
// that corresponds to linear address va. If create!=0,
// create any required page table pages.
static pte_t *
walkpgdir(pde_t *pgdir, const void *va, int create)
walkpgdir(pde_t *pgdir, const void *va, int create, char* (*alloc)(void))
{
pde_t *pde;
pte_t *pgtab;
@ -110,7 +52,7 @@ walkpgdir(pde_t *pgdir, const void *va, int create)
if(*pde & PTE_P){
pgtab = (pte_t*)p2v(PTE_ADDR(*pde));
} else {
if(!create || (pgtab = (pte_t*)kalloc()) == 0)
if(!create || (pgtab = (pte_t*)alloc()) == 0)
return 0;
// Make sure all those PTE_P bits are zero.
memset(pgtab, 0, PGSIZE);
@ -126,7 +68,7 @@ walkpgdir(pde_t *pgdir, const void *va, int create)
// physical addresses starting at pa. la and size might not
// be page-aligned.
static int
mappages(pde_t *pgdir, void *la, uint size, uint pa, int perm)
mappages(pde_t *pgdir, void *la, uint size, uint pa, int perm, char* (*alloc)(void))
{
char *a, *last;
pte_t *pte;
@ -134,7 +76,7 @@ mappages(pde_t *pgdir, void *la, uint size, uint pa, int perm)
a = PGROUNDDOWN(la);
last = PGROUNDDOWN(la + size - 1);
for(;;){
pte = walkpgdir(pgdir, a, 1);
pte = walkpgdir(pgdir, a, 1, alloc);
if(pte == 0)
return -1;
if(*pte & PTE_P)
@ -175,25 +117,25 @@ static struct kmap {
uint e;
int perm;
} kmap[] = {
{ P2V(IOSPACEB), IOSPACEB, IOSPACEE, PTE_W}, // I/O space
{ P2V(0), 0, 1024*1024, PTE_W}, // First 1Mbyte contains BIOS and IO section
{ (void *)KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kernel text, rodata
{ data, V2P(data), PHYSTOP, PTE_W}, // kernel data, memory
{ (void*)0xFE000000, 0xFE000000, 0, PTE_W}, // device mappings
{ (void*)DEVSPACE, DEVSPACE, 0, PTE_W}, // device mappings
};
// Set up kernel part of a page table.
pde_t*
setupkvm(void)
setupkvm(char* (*alloc)(void))
{
pde_t *pgdir;
struct kmap *k;
if((pgdir = (pde_t*)kalloc()) == 0)
if((pgdir = (pde_t*)alloc()) == 0)
return 0;
memset(pgdir, 0, PGSIZE);
k = kmap;
for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
if(mappages(pgdir, k->l, k->e - k->p, (uint)k->p, k->perm) < 0)
if(mappages(pgdir, k->l, k->e - k->p, (uint)k->p, k->perm, alloc) < 0)
return 0;
return pgdir;
@ -204,7 +146,7 @@ setupkvm(void)
void
kvmalloc(void)
{
kpgdir = setupkvm();
kpgdir = setupkvm(boot_alloc);
switchkvm();
}
@ -265,7 +207,7 @@ inituvm(pde_t *pgdir, char *init, uint sz)
panic("inituvm: more than a page");
mem = kalloc();
memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U);
mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc);
memmove(mem, init, sz);
}
@ -280,7 +222,7 @@ loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
if((uint)addr % PGSIZE != 0)
panic("loaduvm: addr must be page aligned");
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, addr+i, 0)) == 0)
if((pte = walkpgdir(pgdir, addr+i, 0, kalloc)) == 0)
panic("loaduvm: address should exist");
pa = PTE_ADDR(*pte);
if(sz - i < PGSIZE)
@ -315,7 +257,7 @@ allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
return 0;
}
memset(mem, 0, PGSIZE);
mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc);
}
return newsz;
}
@ -335,7 +277,7 @@ deallocuvm(pde_t *pgdir, uint oldsz, uint newsz)
a = PGROUNDUP(newsz);
for(; a < oldsz; a += PGSIZE){
pte = walkpgdir(pgdir, (char*)a, 0);
pte = walkpgdir(pgdir, (char*)a, 0, kalloc);
if(pte && (*pte & PTE_P) != 0){
pa = PTE_ADDR(*pte);
if(pa == 0)
@ -377,10 +319,10 @@ copyuvm(pde_t *pgdir, uint sz)
uint pa, i;
char *mem;
if((d = setupkvm()) == 0)
if((d = setupkvm(kalloc)) == 0)
return 0;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0)
if((pte = walkpgdir(pgdir, (void*)i, 0, kalloc)) == 0)
panic("copyuvm: pte should exist");
if(!(*pte & PTE_P))
panic("copyuvm: page not present");
@ -388,7 +330,7 @@ copyuvm(pde_t *pgdir, uint sz)
if((mem = kalloc()) == 0)
goto bad;
memmove(mem, (char*)p2v(pa), PGSIZE);
if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0)
if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc) < 0)
goto bad;
}
return d;
@ -405,7 +347,7 @@ uva2ka(pde_t *pgdir, char *uva)
{
pte_t *pte;
pte = walkpgdir(pgdir, uva, 0);
pte = walkpgdir(pgdir, uva, 0, kalloc);
if((*pte & PTE_P) == 0)
return 0;
if((*pte & PTE_U) == 0)