xv6/vm.c

441 lines
10 KiB
C
Raw Normal View History

2010-07-23 16:52:35 +00:00
#include "param.h"
#include "types.h"
#include "defs.h"
#include "x86.h"
#include "memlayout.h"
2010-07-23 16:52:35 +00:00
#include "mmu.h"
#include "proc.h"
#include "elf.h"
extern char data[]; // defined in data.S
static pde_t *kpgdir; // for use in scheduler()
struct segdesc gdt[NSEGS];
2010-07-23 16:52:35 +00:00
// page map for during boot
// XXX build a static page table in assembly
static void
pgmap(void *va, void *last, uint pa)
{
pde_t *pde;
pte_t *pgtab;
pte_t *pte;
for(;;){
pde = &kpgdir[PDX(va)];
pde_t pdev = *pde;
if (pdev == 0) {
pgtab = (pte_t *) pgalloc();
*pde = v2p(pgtab) | PTE_P | PTE_W;
} else {
pgtab = (pte_t*)p2v(PTE_ADDR(pdev));
}
pte = &pgtab[PTX(va)];
*pte = pa | PTE_W | PTE_P;
if(va == last)
break;
va += PGSIZE;
pa += PGSIZE;
}
}
// set up a page table to get off the ground
void
pginit(char* (*alloc)(void))
{
uint cr0;
kpgdir = (pde_t *) alloc();
pgmap((void *) 0, (void *) PHYSTOP, 0); // map pa 0 at va 0
pgmap((void *) KERNBASE, (void *) (KERNBASE+PHYSTOP), 0); // map pa 0 at va KERNBASE
pgmap((void*)0xFE000000, 0, 0xFE000000);
switchkvm(); // load kpgdir into cr3
cr0 = rcr0();
cr0 |= CR0_PG;
lcr0(cr0); // paging on
// new gdt
gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
lgdt((void *)v2p(gdt), sizeof(gdt));
loadgs(SEG_KDATA << 3);
loadfs(SEG_KDATA << 3);
loades(SEG_KDATA << 3);
loadds(SEG_KDATA << 3);
loadss(SEG_KDATA << 3);
__asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (SEG_KCODE << 3)); // reload cs
}
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
seginit(void)
{
struct cpu *c;
// Map virtual addresses to linear addresses using identity map.
// Cannot share a CODE descriptor for both kernel and user
// because it would have to have DPL_USR, but the CPU forbids
// an interrupt from CPL=0 to DPL=3.
c = &cpus[cpunum()];
c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
2010-09-02 20:36:38 +00:00
// Map cpu, and curproc
c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 8, 0);
lgdt(c->gdt, sizeof(c->gdt));
loadgs(SEG_KCPU << 3);
// Initialize cpu-local storage.
cpu = c;
proc = 0;
}
2010-09-02 20:36:38 +00:00
// Return the address of the PTE in page table pgdir
// that corresponds to linear address va. If create!=0,
// create any required page table pages.
2010-07-23 16:52:35 +00:00
static pte_t *
walkpgdir(pde_t *pgdir, const void *va, int create)
{
pde_t *pde;
pte_t *pgtab;
pde = &pgdir[PDX(va)];
2010-09-01 04:41:25 +00:00
if(*pde & PTE_P){
pgtab = (pte_t*)p2v(PTE_ADDR(*pde));
} else {
if(!create || (pgtab = (pte_t*)kalloc()) == 0)
return 0;
2010-07-23 16:52:35 +00:00
// Make sure all those PTE_P bits are zero.
memset(pgtab, 0, PGSIZE);
// The permissions here are overly generous, but they can
// be further restricted by the permissions in the page table
// entries, if necessary.
*pde = v2p(pgtab) | PTE_P | PTE_W | PTE_U;
2010-07-23 16:52:35 +00:00
}
return &pgtab[PTX(va)];
}
2010-09-02 20:36:38 +00:00
// Create PTEs for linear addresses starting at la that refer to
// physical addresses starting at pa. la and size might not
// be page-aligned.
2010-07-23 16:52:35 +00:00
static int
2010-07-26 12:10:02 +00:00
mappages(pde_t *pgdir, void *la, uint size, uint pa, int perm)
2010-07-23 16:52:35 +00:00
{
char *a, *last;
pte_t *pte;
a = PGROUNDDOWN(la);
last = PGROUNDDOWN(la + size - 1);
for(;;){
pte = walkpgdir(pgdir, a, 1);
if(pte == 0)
return -1;
if(*pte & PTE_P)
panic("remap");
*pte = pa | perm | PTE_P;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
2010-07-23 16:52:35 +00:00
}
return 0;
2010-07-23 16:52:35 +00:00
}
// The mappings from logical to linear are one to one (i.e.,
// segmentation doesn't do anything).
// There is one page table per process, plus one that's used
// when a CPU is not running any process (kpgdir).
// A user process uses the same page table as the kernel; the
// page protection bits prevent it from using anything other
// than its memory.
//
//
// setupkvm() and exec() set up every page table like this:
// 0..USERTOP : user memory (text, data, stack, heap), mapped to some phys mem
// KERNBASE+640K..KERNBASE+1M: mapped to 640K..1M
// KERNBASE+1M..KERNBASE+end : mapped to 1M..end
// KERNBASE+end..KERBASE+PHYSTOP : mapped to end..PHYSTOP (free memory)
// 0xfe000000..0 : mapped direct (devices such as ioapic)
//
// The kernel allocates memory for its heap and for user memory
// between kernend and the end of physical memory (PHYSTOP).
// The virtual address space of each user program includes the kernel
// (which is inaccessible in user mode). The user program sits in
// the bottom of the address space, and the kernel at the top at KERNBASE.
static struct kmap {
void *l;
uint p;
uint e;
int perm;
} kmap[] = {
{ P2V(IOSPACEB), IOSPACEB, IOSPACEE, PTE_W}, // I/O space
{ (void *)KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kernel text, rodata
{ data, V2P(data), PHYSTOP, PTE_W}, // kernel data, memory
{ (void*)0xFE000000, 0xFE000000, 0, PTE_W}, // device mappings
};
2010-07-23 16:52:35 +00:00
// Set up kernel part of a page table.
pde_t*
setupkvm(void)
{
pde_t *pgdir;
struct kmap *k;
2010-07-23 16:52:35 +00:00
if((pgdir = (pde_t*)kalloc()) == 0)
return 0;
memset(pgdir, 0, PGSIZE);
k = kmap;
for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
if(mappages(pgdir, k->l, k->e - k->p, (uint)k->p, k->perm) < 0)
return 0;
return pgdir;
}
2010-07-23 16:52:35 +00:00
// Allocate one page table for the machine for the kernel address
// space for scheduler processes.
void
kvmalloc(void)
{
kpgdir = setupkvm();
switchkvm();
}
// Turn on paging.
void
vmenable(void)
{
uint cr0;
switchkvm(); // load kpgdir into cr3
cr0 = rcr0();
cr0 |= CR0_PG;
lcr0(cr0);
struct cpu *c = &cpus[0];
lgdt((void *)v2p((void *)(c->gdt)), sizeof(c->gdt));
loadgs(SEG_KCPU << 3);
loadfs(SEG_KDATA << 3);
loades(SEG_KDATA << 3);
loadds(SEG_KDATA << 3);
loadss(SEG_KDATA << 3);
__asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (SEG_KCODE << 3)); // reload cs
}
2010-09-02 20:36:38 +00:00
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
switchkvm(void)
{
lcr3(v2p(kpgdir)); // switch to the kernel page table
2010-07-23 16:52:35 +00:00
}
// Switch TSS and h/w page table to correspond to process p.
2010-07-23 16:52:35 +00:00
void
switchuvm(struct proc *p)
2010-07-23 16:52:35 +00:00
{
pushcli();
cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0);
cpu->gdt[SEG_TSS].s = 0;
cpu->ts.ss0 = SEG_KDATA << 3;
cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE;
ltr(SEG_TSS << 3);
2010-09-01 04:41:25 +00:00
if(p->pgdir == 0)
panic("switchuvm: no pgdir");
lcr3(v2p(p->pgdir)); // switch to new address space
2010-07-23 16:52:35 +00:00
popcli();
}
2010-09-02 20:39:55 +00:00
// Load the initcode into address 0 of pgdir.
// sz must be less than a page.
void
inituvm(pde_t *pgdir, char *init, uint sz)
{
char *mem;
if(sz >= PGSIZE)
panic("inituvm: more than a page");
mem = kalloc();
memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U);
memmove(mem, init, sz);
}
2010-09-02 20:39:55 +00:00
// Load a program segment into pgdir. addr must be page-aligned
// and the pages from addr to addr+sz must already be mapped.
int
loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
{
uint i, pa, n;
pte_t *pte;
if((uint)addr % PGSIZE != 0)
panic("loaduvm: addr must be page aligned");
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, addr+i, 0)) == 0)
panic("loaduvm: address should exist");
pa = PTE_ADDR(*pte);
if(sz - i < PGSIZE)
n = sz - i;
else
n = PGSIZE;
if(readi(ip, p2v(pa), offset+i, n) != n)
return -1;
}
return 0;
}
// Allocate page tables and physical memory to grow process from oldsz to
// newsz, which need not be page aligned. Returns new size or 0 on error.
2010-07-23 16:52:35 +00:00
int
allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
2010-07-23 16:52:35 +00:00
{
2011-01-11 18:51:40 +00:00
char *mem;
uint a;
if(newsz > USERTOP)
2010-07-23 16:52:35 +00:00
return 0;
if(newsz < oldsz)
return oldsz;
2011-01-11 18:51:40 +00:00
a = PGROUNDUP(oldsz);
for(; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
cprintf("allocuvm out of memory\n");
deallocuvm(pgdir, newsz, oldsz);
return 0;
2010-07-23 16:52:35 +00:00
}
memset(mem, 0, PGSIZE);
mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
2010-07-23 16:52:35 +00:00
}
return newsz;
2010-07-23 16:52:35 +00:00
}
// Deallocate user pages to bring the process size from oldsz to
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
// need to be less than oldsz. oldsz can be larger than the actual
// process size. Returns the new process size.
int
deallocuvm(pde_t *pgdir, uint oldsz, uint newsz)
{
pte_t *pte;
2011-01-11 18:51:40 +00:00
uint a, pa;
if(newsz >= oldsz)
return oldsz;
2011-01-11 18:51:40 +00:00
a = PGROUNDUP(newsz);
for(; a < oldsz; a += PGSIZE){
pte = walkpgdir(pgdir, (char*)a, 0);
if(pte && (*pte & PTE_P) != 0){
pa = PTE_ADDR(*pte);
if(pa == 0)
panic("kfree");
char *v = p2v(pa);
kfree(v);
*pte = 0;
}
}
return newsz;
}
2010-09-02 20:36:38 +00:00
// Free a page table and all the physical memory pages
// in the user part.
2010-07-23 16:52:35 +00:00
void
freevm(pde_t *pgdir)
{
2010-09-02 19:18:19 +00:00
uint i;
2010-07-23 16:52:35 +00:00
if(pgdir == 0)
2010-09-02 19:18:19 +00:00
panic("freevm: no pgdir");
deallocuvm(pgdir, USERTOP, 0);
2010-09-01 04:41:25 +00:00
for(i = 0; i < NPDENTRIES; i++){
if(pgdir[i] & PTE_P) {
char * v = p2v(PTE_ADDR(pgdir[i]));
kfree(v);
}
2010-07-23 16:52:35 +00:00
}
kfree((char*)pgdir);
2010-07-23 16:52:35 +00:00
}
2010-09-02 20:36:38 +00:00
// Given a parent process's page table, create a copy
// of it for a child.
2010-07-23 16:52:35 +00:00
pde_t*
copyuvm(pde_t *pgdir, uint sz)
{
pde_t *d;
2010-07-23 16:52:35 +00:00
pte_t *pte;
uint pa, i;
char *mem;
if((d = setupkvm()) == 0)
return 0;
2010-09-01 04:41:25 +00:00
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0)
panic("copyuvm: pte should exist");
if(!(*pte & PTE_P))
panic("copyuvm: page not present");
pa = PTE_ADDR(*pte);
if((mem = kalloc()) == 0)
goto bad;
memmove(mem, (char*)p2v(pa), PGSIZE);
if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0)
goto bad;
2010-07-23 16:52:35 +00:00
}
return d;
2010-09-01 04:27:12 +00:00
bad:
freevm(d);
return 0;
2010-07-23 16:52:35 +00:00
}
//PAGEBREAK!
// Map user virtual address to kernel address.
char*
uva2ka(pde_t *pgdir, char *uva)
{
pte_t *pte;
pte = walkpgdir(pgdir, uva, 0);
if((*pte & PTE_P) == 0)
return 0;
if((*pte & PTE_U) == 0)
return 0;
return (char*)p2v(PTE_ADDR(*pte));
}
// Copy len bytes from p to user address va in page table pgdir.
// Most useful when pgdir is not the current page table.
// uva2ka ensures this only works for PTE_U pages.
int
copyout(pde_t *pgdir, uint va, void *p, uint len)
{
char *buf, *pa0;
uint n, va0;
buf = (char*)p;
while(len > 0){
va0 = (uint)PGROUNDDOWN(va);
pa0 = uva2ka(pgdir, (char*)va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (va - va0);
if(n > len)
n = len;
memmove(pa0 + (va - va0), buf, n);
len -= n;
buf += n;
va = va0 + PGSIZE;
}
return 0;
}