提交 98ae7c9c 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

fold pgdir into vmap, so threads share hw pt, and unmap shootdown works

上级 1b93142d
......@@ -166,7 +166,6 @@ void uartputc(int);
void seginit(void);
void kvmalloc(void);
void vmenable(void);
pde_t* setupkvm(void);
char* uva2ka(pde_t*, char*);
struct vmnode* vmn_alloc(uint, uint);
struct vmnode* vmn_allocpg(uint);
......@@ -177,13 +176,12 @@ void vmap_decref(struct vmap *);
int vmap_insert(struct vmap *, struct vmnode *n, uint);
int vmap_remove(struct vmap *, uint va_start, uint len);
struct vma * vmap_lookup(struct vmap *, uint);
struct vmap * vmap_copy(struct vmap *, pde_t*, int);
void freevm(pde_t*);
struct vmap * vmap_copy(struct vmap *, int);
void switchuvm(struct proc*);
void switchkvm(void);
int copyout(struct vmap *, uint, void*, uint);
int copyin(struct vmap *, uint, void*, uint);
int pagefault(pde_t*, struct vmap *, uint, uint);
int pagefault(struct vmap *, uint, uint);
void clearpages(pde_t *pgdir, void *begin, void *end);
// number of elements in fixed-size array
......
......@@ -18,7 +18,6 @@ exec(char *path, char **argv)
struct elfhdr elf;
struct inode *ip = 0;
struct proghdr ph;
pde_t *pgdir = 0, *oldpgdir;
struct vmap *vmap = 0, *oldvmap;
struct vmnode *vmn = 0;
int odp = 1;
......@@ -26,7 +25,6 @@ exec(char *path, char **argv)
if((ip = namei(path)) == 0)
return -1;
ilock(ip);
pgdir = 0;
// Check ELF header
if(readi(ip, (char*)&elf, 0, sizeof(elf)) < sizeof(elf))
......@@ -34,9 +32,6 @@ exec(char *path, char **argv)
if(elf.magic != ELF_MAGIC)
goto bad;
if((pgdir = setupkvm()) == 0)
goto bad;
if((vmap = vmap_alloc()) == 0)
goto bad;
......@@ -122,23 +117,18 @@ exec(char *path, char **argv)
safestrcpy(proc->name, last, sizeof(proc->name));
// Commit to the user image.
oldpgdir = proc->pgdir;
oldvmap = proc->vmap;
proc->pgdir = pgdir;
proc->vmap = vmap;
proc->brk = brk + 4; // XXX so that brk-1 points within heap vma..
proc->tf->eip = elf.entry; // main
proc->tf->esp = sp;
switchuvm(proc);
freevm(oldpgdir);
vmap_decref(oldvmap);
return 0;
bad:
cprintf("exec failed\n");
if(pgdir)
freevm(pgdir);
if(ip)
iunlockput(ip);
if(vmap)
......
......@@ -145,8 +145,6 @@ userinit(void)
p = allocproc();
initproc = p;
if((p->pgdir = setupkvm()) == 0)
panic("userinit: out of memory?");
if((p->vmap = vmap_alloc()) == 0)
panic("userinit: out of vmaps?");
struct vmnode *vmn = vmn_allocpg(PGROUNDUP((int)_binary_initcode_size) / PGSIZE);
......@@ -211,17 +209,9 @@ fork(int flags)
if((np = allocproc()) == 0)
return -1;
if((np->pgdir = setupkvm()) == 0){
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
return -1;
}
if(flags == 0) {
// Copy process state from p.
if((np->vmap = vmap_copy(proc->vmap, proc->pgdir, cow)) == 0){
freevm(np->pgdir);
if((np->vmap = vmap_copy(proc->vmap, cow)) == 0){
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
......@@ -328,7 +318,6 @@ wait(void)
SLIST_REMOVE(&proc->childq, p, proc, child_next);
kfree(p->kstack);
p->kstack = 0;
freevm(p->pgdir);
vmap_decref(p->vmap);
p->state = UNUSED;
p->pid = 0;
......
......@@ -58,12 +58,12 @@ struct vmap {
struct spinlock lock; // serialize map/lookup/unmap
uint ref;
uint alloc;
pde_t *pgdir; // Page table
};
// Per-process state
struct proc {
struct vmap *vmap; // va -> vma
pde_t* pgdir; // Page table
uint brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process
enum procstate state; // Process state
......
......@@ -20,9 +20,9 @@
int
fetchint(uint addr, int *ip)
{
if(pagefault(proc->pgdir, proc->vmap, addr, 0) < 0)
if(pagefault(proc->vmap, addr, 0) < 0)
return -1;
if(pagefault(proc->pgdir, proc->vmap, addr+3, 0) < 0)
if(pagefault(proc->vmap, addr+3, 0) < 0)
return -1;
*ip = *(int*)(addr);
return 0;
......@@ -37,7 +37,7 @@ fetchstr(uint addr, char **pp)
char *s = (char *) addr;
while(1){
if(pagefault(proc->pgdir, proc->vmap, (uint) s, 0) < 0)
if(pagefault(proc->vmap, (uint) s, 0) < 0)
return -1;
if(*s == 0){
*pp = (char*)addr;
......@@ -66,7 +66,7 @@ argptr(int n, char **pp, int size)
if(argint(n, &i) < 0)
return -1;
for(uint va = PGROUNDDOWN(i); va < i+size; va = va + PGSIZE)
if(pagefault(proc->pgdir, proc->vmap, va, 0) < 0)
if(pagefault(proc->vmap, va, 0) < 0)
return -1;
*pp = (char*)i;
return 0;
......
......@@ -131,11 +131,11 @@ sys_unmap(void)
if (vmap_remove(proc->vmap, PGROUNDDOWN(addr), PGROUNDUP(len)) < 0)
return -1;
clearpages(proc->pgdir,
clearpages(proc->vmap->pgdir,
(void*) (PGROUNDDOWN(addr)),
(void*) (PGROUNDDOWN(addr)+PGROUNDUP(len)));
cli();
lcr3(PADDR(proc->pgdir));
lcr3(PADDR(proc->vmap->pgdir));
for (uint i = 0; i < ncpu; i++)
if (i != cpu->id)
lapic_tlbflush(i);
......
......@@ -94,7 +94,7 @@ trap(struct trapframe *tf)
}
if(tf->trapno == T_PGFLT){
if(pagefault(proc->pgdir, proc->vmap, rcr2(), tf->err) >= 0){
if(pagefault(proc->vmap, rcr2(), tf->err) >= 0){
return;
}
}
......
......@@ -14,14 +14,6 @@ extern char data[]; // defined in data.S
static pde_t *kpgdir; // for use in scheduler()
// Allocate one page table for the machine for the kernel address
// space for scheduler processes.
void
kvmalloc(void)
{
kpgdir = setupkvm();
}
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
......@@ -179,7 +171,7 @@ static struct kmap {
};
// Set up kernel part of a page table.
pde_t*
static pde_t*
setupkvm(void)
{
pde_t *pgdir;
......@@ -196,6 +188,14 @@ setupkvm(void)
return pgdir;
}
// Allocate one page table for the machine for the kernel address
// space for scheduler processes.
void
kvmalloc(void)
{
kpgdir = setupkvm();
}
// Turn on paging.
void
vmenable(void)
......@@ -226,12 +226,28 @@ switchuvm(struct proc *p)
cpu->ts.ss0 = SEG_KDATA << 3;
cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE;
ltr(SEG_TSS << 3);
if(p->pgdir == 0)
panic("switchuvm: no pgdir");
lcr3(PADDR(p->pgdir)); // switch to new address space
if(p->vmap == 0 || p->vmap->pgdir == 0)
panic("switchuvm: no vmap/pgdir");
lcr3(PADDR(p->vmap->pgdir)); // switch to new address space
popcli();
}
// Free a page table and all the physical memory pages
// in the user part.
static void
freevm(pde_t *pgdir)
{
uint i;
if(pgdir == 0)
panic("freevm: no pgdir");
for(i = 0; i < NPDENTRIES; i++){
if(pgdir[i] & PTE_P)
kfree((char*)PTE_ADDR(pgdir[i]));
}
kfree((char*)pgdir);
}
struct {
struct vmnode n[1024];
} vmnodes;
......@@ -341,6 +357,9 @@ vmap_alloc(void)
}
m->lock.name = "vmap";
m->ref = 1;
m->pgdir = setupkvm();
if (m->pgdir == 0)
panic("vmap_alloc: setupkvm out of memory");
return m;
}
}
......@@ -353,6 +372,8 @@ vmap_free(struct vmap *m)
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++)
if(m->e[i].n)
vmn_decref(m->e[i].n);
freevm(m->pgdir);
m->pgdir = 0;
m->alloc = 0;
}
......@@ -430,7 +451,7 @@ vmap_lookup(struct vmap *m, uint va)
}
struct vmap *
vmap_copy(struct vmap *m, pde_t* pgdir, int share)
vmap_copy(struct vmap *m, int share)
{
struct vmap *c = vmap_alloc();
if(c == 0)
......@@ -446,7 +467,7 @@ vmap_copy(struct vmap *m, pde_t* pgdir, int share)
c->e[i].n = m->e[i].n;
c->e[i].va_type = COW;
m->e[i].va_type = COW;
updatepages(pgdir, (void *) (m->e[i].va_start), (void *) (m->e[i].va_end), PTE_COW);
updatepages(m->pgdir, (void *) (m->e[i].va_start), (void *) (m->e[i].va_end), PTE_COW);
} else {
c->e[i].n = vmn_copy(m->e[i].n);
c->e[i].va_type = m->e[i].va_type;
......@@ -459,7 +480,7 @@ vmap_copy(struct vmap *m, pde_t* pgdir, int share)
__sync_fetch_and_add(&c->e[i].n->ref, 1);
}
if (share)
lcr3(PADDR(pgdir)); // Reload hardware page table
lcr3(PADDR(m->pgdir)); // Reload hardware page table
release(&m->lock);
return c;
......@@ -495,22 +516,6 @@ vmn_load(struct vmnode *vmn, struct inode *ip, uint offset, uint sz)
}
}
// Free a page table and all the physical memory pages
// in the user part.
void
freevm(pde_t *pgdir)
{
uint i;
if(pgdir == 0)
panic("freevm: no pgdir");
for(i = 0; i < NPDENTRIES; i++){
if(pgdir[i] & PTE_P)
kfree((char*)PTE_ADDR(pgdir[i]));
}
kfree((char*)pgdir);
}
//PAGEBREAK!
// Map user virtual address to kernel physical address.
char*
......@@ -581,10 +586,10 @@ copyin(struct vmap *vmap, uint va, void *p, uint len)
}
int
pagefault(pde_t *pgdir, struct vmap *vmap, uint va, uint err)
pagefault(struct vmap *vmap, uint va, uint err)
{
pte_t *pte = walkpgdir(pgdir, (const void *)va, 1);
pte_t *pte = walkpgdir(vmap->pgdir, (const void *)va, 1);
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W))
return 0;
......@@ -606,7 +611,7 @@ pagefault(pde_t *pgdir, struct vmap *vmap, uint va, uint err)
panic("pagefault: couldn't load");
}
acquire(&m->lock);
pte = walkpgdir(pgdir, (const void *)va, 0);
pte = walkpgdir(vmap->pgdir, (const void *)va, 0);
if (pte == 0x0)
panic("pagefault: not paged in???");
// cprintf("ODP done\n");
......@@ -627,8 +632,8 @@ pagefault(pde_t *pgdir, struct vmap *vmap, uint va, uint err)
m->va_type = PRIVATE;
m->n = c;
// Update the hardware page tables to reflect the change to the vma
clearpages(pgdir, (void *) m->va_start, (void *) m->va_end);
pte = walkpgdir(pgdir, (const void *)va, 0);
clearpages(vmap->pgdir, (void *) m->va_start, (void *) m->va_end);
pte = walkpgdir(vmap->pgdir, (const void *)va, 0);
*pte = PADDR(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
}
} else if (m->va_type == COW) {
......@@ -642,7 +647,7 @@ pagefault(pde_t *pgdir, struct vmap *vmap, uint va, uint err)
}
*pte = PADDR(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
}
lcr3(PADDR(pgdir)); // Reload hardware page tables
lcr3(PADDR(vmap->pgdir)); // Reload hardware page tables
release(&m->lock);
return 1;
}
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论