eliminate enter_alloc -- use kalloc for everything

This commit is contained in:
Robert Morris 2011-09-13 13:14:52 -04:00
parent 7e7cb106d0
commit c092540e39
4 changed files with 53 additions and 51 deletions

7
defs.h
View file

@ -62,11 +62,10 @@ extern uchar ioapicid;
void ioapicinit(void); void ioapicinit(void);
// kalloc.c // kalloc.c
char* enter_alloc(void);
char* kalloc(void); char* kalloc(void);
void kfree(char*); void kfree(char*);
void kinit(void); void kinit1(void*, void*);
uint detect_memory(void); void kinit2(void*, void*);
// kbd.c // kbd.c
void kbdintr(void); void kbdintr(void);
@ -165,7 +164,7 @@ void uartputc(int);
void seginit(void); void seginit(void);
void kvmalloc(void); void kvmalloc(void);
void vmenable(void); void vmenable(void);
pde_t* setupkvm(char* (*alloc)()); pde_t* setupkvm();
char* uva2ka(pde_t*, char*); char* uva2ka(pde_t*, char*);
int allocuvm(pde_t*, uint, uint); int allocuvm(pde_t*, uint, uint);
int deallocuvm(pde_t*, uint, uint); int deallocuvm(pde_t*, uint, uint);

View file

@ -9,42 +9,45 @@
#include "mmu.h" #include "mmu.h"
#include "spinlock.h" #include "spinlock.h"
void freerange(void *vstart, void *vend);
extern char end[]; // first address after kernel loaded from ELF file
struct run { struct run {
struct run *next; struct run *next;
}; };
struct { struct {
struct spinlock lock; struct spinlock lock;
int use_lock;
struct run *freelist; struct run *freelist;
} kmem; } kmem;
extern char end[]; // first address after kernel loaded from ELF file // Initialization happens in two phases.
static char *newend; // 1. main() calls kinit1() while still using entrypgdir to place just
// the pages mapped by entrypgdir on free list.
// A simple page allocator to get off the ground during entry // 2. main() calls kinit2() with the rest of the physical pages
char * // after installing a full page table that maps them on all cores.
enter_alloc(void) void
kinit1(void *vstart, void *vend)
{ {
if (newend == 0) initlock(&kmem.lock, "kmem");
newend = end; kmem.use_lock = 0;
freerange(vstart, vend);
if ((uint) newend >= KERNBASE + 0x400000)
panic("only first 4Mbyte are mapped during entry");
void *p = (void*)PGROUNDUP((uint)newend);
memset(p, 0, PGSIZE);
newend = newend + PGSIZE;
return p;
} }
// Initialize free list of physical pages.
void void
kinit(void) kinit2(void *vstart, void *vend)
{
freerange(vstart, vend);
kmem.use_lock = 1;
}
void
freerange(void *vstart, void *vend)
{ {
char *p; char *p;
p = (char*)PGROUNDUP((uint)vstart);
initlock(&kmem.lock, "kmem"); for(; p + PGSIZE <= (char*)vend; p += PGSIZE)
p = (char*)PGROUNDUP((uint)newend);
for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE)
kfree(p); kfree(p);
} }
@ -64,11 +67,13 @@ kfree(char *v)
// Fill with junk to catch dangling refs. // Fill with junk to catch dangling refs.
memset(v, 1, PGSIZE); memset(v, 1, PGSIZE);
acquire(&kmem.lock); if(kmem.use_lock)
acquire(&kmem.lock);
r = (struct run*)v; r = (struct run*)v;
r->next = kmem.freelist; r->next = kmem.freelist;
kmem.freelist = r; kmem.freelist = r;
release(&kmem.lock); if(kmem.use_lock)
release(&kmem.lock);
} }
// Allocate one 4096-byte page of physical memory. // Allocate one 4096-byte page of physical memory.
@ -79,11 +84,13 @@ kalloc(void)
{ {
struct run *r; struct run *r;
acquire(&kmem.lock); if(kmem.use_lock)
acquire(&kmem.lock);
r = kmem.freelist; r = kmem.freelist;
if(r) if(r)
kmem.freelist = r->next; kmem.freelist = r->next;
release(&kmem.lock); if(kmem.use_lock)
release(&kmem.lock);
return (char*)r; return (char*)r;
} }

15
main.c
View file

@ -9,6 +9,7 @@
static void startothers(void); static void startothers(void);
static void mpmain(void) __attribute__((noreturn)); static void mpmain(void) __attribute__((noreturn));
extern pde_t *kpgdir; extern pde_t *kpgdir;
extern char end[]; // first address after kernel loaded from ELF file
// Bootstrap processor starts running C code here. // Bootstrap processor starts running C code here.
// Allocate a real stack and switch to it, first // Allocate a real stack and switch to it, first
@ -16,6 +17,7 @@ extern pde_t *kpgdir;
int int
main(void) main(void)
{ {
kinit1(end, P2V(4*1024*1024)); // phys page allocator
kvmalloc(); // kernel page table kvmalloc(); // kernel page table
mpinit(); // collect info about this machine mpinit(); // collect info about this machine
lapicinit(mpbcpu()); lapicinit(mpbcpu());
@ -33,9 +35,9 @@ main(void)
ideinit(); // disk ideinit(); // disk
if(!ismp) if(!ismp)
timerinit(); // uniprocessor timer timerinit(); // uniprocessor timer
startothers(); // start other processors (must come before kinit) startothers(); // start other processors
kinit(); // initialize memory allocator kinit2(P2V(4*1024*1024), P2V(PHYSTOP)); // must come after startothers()
userinit(); // first user process (must come after kinit) userinit(); // first user process
// Finish setting up this processor in mpmain. // Finish setting up this processor in mpmain.
mpmain(); mpmain();
} }
@ -84,12 +86,7 @@ startothers(void)
// Tell entryother.S what stack to use, where to enter, and what // Tell entryother.S what stack to use, where to enter, and what
// pgdir to use. We cannot use kpgdir yet, because the AP processor // pgdir to use. We cannot use kpgdir yet, because the AP processor
// is running in low memory, so we use entrypgdir for the APs too. // is running in low memory, so we use entrypgdir for the APs too.
// kalloc can return addresses above 4Mbyte (the machine may have stack = kalloc();
// much more physical memory than 4Mbyte), which aren't mapped by
// entrypgdir, so we must allocate a stack using enter_alloc();
// this introduces the constraint that xv6 cannot use kalloc until
// after these last enter_alloc invocations.
stack = enter_alloc();
*(void**)(code-4) = stack + KSTACKSIZE; *(void**)(code-4) = stack + KSTACKSIZE;
*(void**)(code-8) = mpenter; *(void**)(code-8) = mpenter;
*(int**)(code-12) = (void *) v2p(entrypgdir); *(int**)(code-12) = (void *) v2p(entrypgdir);

25
vm.c
View file

@ -43,7 +43,7 @@ seginit(void)
// that corresponds to virtual address va. If alloc!=0, // that corresponds to virtual address va. If alloc!=0,
// create any required page table pages. // create any required page table pages.
static pte_t * static pte_t *
walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void)) walkpgdir(pde_t *pgdir, const void *va, int alloc)
{ {
pde_t *pde; pde_t *pde;
pte_t *pgtab; pte_t *pgtab;
@ -52,7 +52,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void))
if(*pde & PTE_P){ if(*pde & PTE_P){
pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); pgtab = (pte_t*)p2v(PTE_ADDR(*pde));
} else { } else {
if(!alloc || (pgtab = (pte_t*)alloc()) == 0) if(!alloc || (pgtab = (pte_t*)kalloc()) == 0)
return 0; return 0;
// Make sure all those PTE_P bits are zero. // Make sure all those PTE_P bits are zero.
memset(pgtab, 0, PGSIZE); memset(pgtab, 0, PGSIZE);
@ -68,8 +68,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void))
// physical addresses starting at pa. va and size might not // physical addresses starting at pa. va and size might not
// be page-aligned. // be page-aligned.
static int static int
mappages(pde_t *pgdir, void *va, uint size, uint pa, mappages(pde_t *pgdir, void *va, uint size, uint pa, int perm)
int perm, char* (*alloc)(void))
{ {
char *a, *last; char *a, *last;
pte_t *pte; pte_t *pte;
@ -77,7 +76,7 @@ mappages(pde_t *pgdir, void *va, uint size, uint pa,
a = (char*)PGROUNDDOWN((uint)va); a = (char*)PGROUNDDOWN((uint)va);
last = (char*)PGROUNDDOWN(((uint)va) + size - 1); last = (char*)PGROUNDDOWN(((uint)va) + size - 1);
for(;;){ for(;;){
if((pte = walkpgdir(pgdir, a, alloc)) == 0) if((pte = walkpgdir(pgdir, a, 1)) == 0)
return -1; return -1;
if(*pte & PTE_P) if(*pte & PTE_P)
panic("remap"); panic("remap");
@ -127,19 +126,19 @@ static struct kmap {
// Set up kernel part of a page table. // Set up kernel part of a page table.
pde_t* pde_t*
setupkvm(char* (*alloc)(void)) setupkvm()
{ {
pde_t *pgdir; pde_t *pgdir;
struct kmap *k; struct kmap *k;
if((pgdir = (pde_t*)alloc()) == 0) if((pgdir = (pde_t*)kalloc()) == 0)
return 0; return 0;
memset(pgdir, 0, PGSIZE); memset(pgdir, 0, PGSIZE);
if (p2v(PHYSTOP) > (void*)DEVSPACE) if (p2v(PHYSTOP) > (void*)DEVSPACE)
panic("PHYSTOP too high"); panic("PHYSTOP too high");
for(k = kmap; k < &kmap[NELEM(kmap)]; k++) for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
if(mappages(pgdir, k->virt, k->phys_end - k->phys_start, if(mappages(pgdir, k->virt, k->phys_end - k->phys_start,
(uint)k->phys_start, k->perm, alloc) < 0) (uint)k->phys_start, k->perm) < 0)
return 0; return 0;
return pgdir; return pgdir;
} }
@ -149,7 +148,7 @@ setupkvm(char* (*alloc)(void))
void void
kvmalloc(void) kvmalloc(void)
{ {
kpgdir = setupkvm(enter_alloc); kpgdir = setupkvm();
switchkvm(); switchkvm();
} }
@ -188,7 +187,7 @@ inituvm(pde_t *pgdir, char *init, uint sz)
panic("inituvm: more than a page"); panic("inituvm: more than a page");
mem = kalloc(); mem = kalloc();
memset(mem, 0, PGSIZE); memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U);
memmove(mem, init, sz); memmove(mem, init, sz);
} }
@ -238,7 +237,7 @@ allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
return 0; return 0;
} }
memset(mem, 0, PGSIZE); memset(mem, 0, PGSIZE);
mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
} }
return newsz; return newsz;
} }
@ -315,7 +314,7 @@ copyuvm(pde_t *pgdir, uint sz)
uint pa, i; uint pa, i;
char *mem; char *mem;
if((d = setupkvm(kalloc)) == 0) if((d = setupkvm()) == 0)
return 0; return 0;
for(i = 0; i < sz; i += PGSIZE){ for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
@ -326,7 +325,7 @@ copyuvm(pde_t *pgdir, uint sz)
if((mem = kalloc()) == 0) if((mem = kalloc()) == 0)
goto bad; goto bad;
memmove(mem, (char*)p2v(pa), PGSIZE); memmove(mem, (char*)p2v(pa), PGSIZE);
if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc) < 0) if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0)
goto bad; goto bad;
} }
return d; return d;