Ability to clear pml4s when using per-thread pml4s

上级 4a3ed3ac
...@@ -22,13 +22,14 @@ struct pgmap; ...@@ -22,13 +22,14 @@ struct pgmap;
struct proc_pgmap : public referenced { struct proc_pgmap : public referenced {
pgmap* const pml4; pgmap* const pml4;
vmap* const vmp;
static proc_pgmap* alloc(); static proc_pgmap* alloc(vmap* vmap);
virtual void onzero() const { delete this; } virtual void onzero() const { delete this; }
proc_pgmap& operator=(const proc_pgmap&) = delete; proc_pgmap& operator=(const proc_pgmap&) = delete;
proc_pgmap(const proc_pgmap& x) = delete; proc_pgmap(const proc_pgmap& x) = delete;
private: private:
proc_pgmap(); proc_pgmap(vmap* vmap);
~proc_pgmap(); ~proc_pgmap();
NEW_DELETE_OPS(proc_pgmap) NEW_DELETE_OPS(proc_pgmap)
}; };
......
...@@ -122,6 +122,9 @@ struct vmap { ...@@ -122,6 +122,9 @@ struct vmap {
int copyout(uptr va, void *p, u64 len); int copyout(uptr va, void *p, u64 len);
int sbrk(ssize_t n, uptr *addr); int sbrk(ssize_t n, uptr *addr);
void add_pgmap(proc_pgmap* pgmap);
void rem_pgmap(proc_pgmap* pgmap);
uptr brk_; // Top of heap uptr brk_; // Top of heap
private: private:
...@@ -134,4 +137,11 @@ private: ...@@ -134,4 +137,11 @@ private:
uptr unmapped_area(size_t n); uptr unmapped_area(size_t n);
struct spinlock brklock_; struct spinlock brklock_;
// XXX(sbw) most likely an awful hash function
static u64 proc_pgmap_hash(proc_pgmap* const & p)
{
return (u64) p;
}
xns<proc_pgmap*, proc_pgmap*, proc_pgmap_hash> pgmap_list_;
}; };
...@@ -181,7 +181,7 @@ exec(const char *path, char **argv, void *ascopev) ...@@ -181,7 +181,7 @@ exec(const char *path, char **argv, void *ascopev)
if((vmp = vmap::alloc()) == 0) if((vmp = vmap::alloc()) == 0)
goto bad; goto bad;
if ((pgmap = proc_pgmap::alloc()) == 0) if ((pgmap = proc_pgmap::alloc(vmp)) == 0)
goto bad; goto bad;
for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){ for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){
......
...@@ -30,23 +30,28 @@ struct kstack_tag kstack_tag[NCPU]; ...@@ -30,23 +30,28 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 }; enum { sched_debug = 0 };
proc_pgmap::proc_pgmap(void) proc_pgmap::proc_pgmap(vmap* vmap)
: pml4(setupkvm()) : pml4(setupkvm()), vmp(vmap)
{ {
if (pml4 == nullptr) { if (pml4 == nullptr) {
cprintf("proc_pgmap::proc_pgmap: setupkvm out of memory\n"); cprintf("proc_pgmap::proc_pgmap: setupkvm out of memory\n");
throw_bad_alloc(); throw_bad_alloc();
} }
vmp->ref++;
vmp->add_pgmap(this);
} }
proc_pgmap* proc_pgmap*
proc_pgmap::alloc(void) proc_pgmap::alloc(vmap* vmap)
{ {
return new proc_pgmap(); return new proc_pgmap(vmap);
} }
proc_pgmap::~proc_pgmap(void) proc_pgmap::~proc_pgmap(void)
{ {
vmp->rem_pgmap(this);
vmp->decref();
freevm(pml4); freevm(pml4);
} }
...@@ -456,7 +461,7 @@ fork(int flags) ...@@ -456,7 +461,7 @@ fork(int flags)
np->vmap = myproc()->vmap; np->vmap = myproc()->vmap;
np->vmap->ref++; np->vmap->ref++;
if (flags & FORK_SEPARATE_PGMAP) { if (flags & FORK_SEPARATE_PGMAP) {
np->pgmap = proc_pgmap::alloc(); np->pgmap = proc_pgmap::alloc(np->vmap);
} else { } else {
np->pgmap = myproc()->pgmap; np->pgmap = myproc()->pgmap;
myproc()->pgmap->inc(); myproc()->pgmap->inc();
...@@ -464,7 +469,7 @@ fork(int flags) ...@@ -464,7 +469,7 @@ fork(int flags)
} else { } else {
// Copy process state from p. // Copy process state from p.
np->vmap = myproc()->vmap->copy(cow, myproc()->pgmap); np->vmap = myproc()->vmap->copy(cow, myproc()->pgmap);
np->pgmap = proc_pgmap::alloc(); np->pgmap = proc_pgmap::alloc(np->vmap);
} }
np->parent = myproc(); np->parent = myproc();
......
...@@ -29,7 +29,7 @@ inituser(void) ...@@ -29,7 +29,7 @@ inituser(void)
bootproc = p; bootproc = p;
if((p->vmap = vmap::alloc()) == 0) if((p->vmap = vmap::alloc()) == 0)
panic("userinit: out of vmaps?"); panic("userinit: out of vmaps?");
if ((p->pgmap = proc_pgmap::alloc()) == 0) if ((p->pgmap = proc_pgmap::alloc(p->vmap)) == 0)
panic("inituser: alloc proc_pgmap"); panic("inituser: alloc proc_pgmap");
vmnode *vmn = new vmnode(PGROUNDUP(_initcode_size) / PGSIZE); vmnode *vmn = new vmnode(PGROUNDUP(_initcode_size) / PGSIZE);
if(vmn == 0) if(vmn == 0)
......
...@@ -197,6 +197,20 @@ to_stream(print_stream *s, vma *v) ...@@ -197,6 +197,20 @@ to_stream(print_stream *s, vma *v)
* vmap * vmap
*/ */
void
vmap::add_pgmap(proc_pgmap* pgmap)
{
if (pgmap_list_.insert(pgmap, pgmap) < 0)
panic("vmap::add_pgmap");
}
void
vmap::rem_pgmap(proc_pgmap* pgmap)
{
if (!pgmap_list_.remove(pgmap, nullptr))
panic("vmap::rem_pgmap");
}
vmap* vmap*
vmap::alloc(void) vmap::alloc(void)
{ {
...@@ -210,15 +224,14 @@ vmap::vmap() : ...@@ -210,15 +224,14 @@ vmap::vmap() :
#if VM_RADIX #if VM_RADIX
vmas(PGSHIFT), vmas(PGSHIFT),
#endif #endif
ref(1), kshared((char*) ksalloc(slab_kshared)), brk_(0) ref(1), kshared((char*) ksalloc(slab_kshared)), brk_(0),
pgmap_list_(false)
{ {
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
if (kshared == nullptr) { if (kshared == nullptr) {
cprintf("vmap::vmap: kshared out of memory\n"); cprintf("vmap::vmap: kshared out of memory\n");
goto err; goto err;
} }
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
return; return;
err: err:
...@@ -395,6 +408,7 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb, proc_pgmap* pgmap) ...@@ -395,6 +408,7 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb, proc_pgmap* pgmap)
vma *e; vma *e;
bool replaced = false; bool replaced = false;
bool fixed = (vma_start != 0); bool fixed = (vma_start != 0);
bool updateall = true;
again: again:
if (!fixed) { if (!fixed) {
...@@ -453,25 +467,37 @@ again: ...@@ -453,25 +467,37 @@ again:
#if VM_RADIX #if VM_RADIX
span.replace(e->vma_start, e->vma_end-e->vma_start, e); span.replace(e->vma_start, e->vma_end-e->vma_start, e);
#endif #endif
// XXX(sbw) Replace should tell what cores to update
} }
bool needtlb = false; bool needtlb = false;
if (replaced)
updatepages(pgmap->pml4, e->vma_start, e->vma_end, auto update = [&needtlb, &updateall](atomic<pme_t> *p) {
[&needtlb](atomic<pme_t> *p) for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true && updateall;
break;
}
}
};
if (replaced) {
if (updateall)
pgmap_list_.enumerate([&](proc_pgmap* const &p,
proc_pgmap* const &x)->bool
{ {
for (;;) { updatepages(p->pml4, e->vma_start, e->vma_end, update);
pme_t v = p->load(); return false;
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true;
break;
}
}
}); });
else
updatepages(pgmap->pml4, e->vma_start, e->vma_end, update);
}
if (tlb_shootdown) { if (tlb_shootdown) {
if (needtlb && dotlb) if (needtlb && dotlb)
tlbflush(); tlbflush();
...@@ -486,6 +512,7 @@ again: ...@@ -486,6 +512,7 @@ again:
int int
vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap) vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap)
{ {
bool updateall = true;
{ {
// new scope to release the search lock before tlbflush // new scope to release the search lock before tlbflush
...@@ -512,22 +539,35 @@ vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap) ...@@ -512,22 +539,35 @@ vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap)
// could skip the updatepages. // could skip the updatepages.
span.replace(vma_start, len, 0); span.replace(vma_start, len, 0);
#endif #endif
// XXX(sbw) Replace should tell what cores to update
} }
bool needtlb = false; bool needtlb = false;
updatepages(pgmap->pml4, vma_start, vma_start + len, [&needtlb](atomic<pme_t> *p) {
for (;;) { auto update = [&needtlb, &updateall](atomic<pme_t> *p) {
pme_t v = p->load(); for (;;) {
if (v & PTE_LOCK) pme_t v = p->load();
continue; if (v & PTE_LOCK)
if (!(v & PTE_P)) continue;
break; if (!(v & PTE_P))
if (cmpxch(p, v, (pme_t) 0)) { break;
needtlb = true; if (cmpxch(p, v, (pme_t) 0)) {
break; needtlb = true && updateall;
} break;
} }
}
};
if (updateall)
pgmap_list_.enumerate([&](proc_pgmap* const &p,
proc_pgmap* const &x)->bool
{
updatepages(p->pml4, vma_start, vma_start + len, update);
return false;
}); });
else
updatepages(pgmap->pml4, vma_start, vma_start + len, update);
if (tlb_shootdown && needtlb) { if (tlb_shootdown && needtlb) {
if (tlb_lazy) { if (tlb_lazy) {
myproc()->unmap_tlbreq_ = tlbflush_req++; myproc()->unmap_tlbreq_ = tlbflush_req++;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论