Ability to clear pml4s when using per-thread pml4s

上级 4a3ed3ac
......@@ -22,13 +22,14 @@ struct pgmap;
struct proc_pgmap : public referenced {
pgmap* const pml4;
vmap* const vmp;
static proc_pgmap* alloc();
static proc_pgmap* alloc(vmap* vmap);
virtual void onzero() const { delete this; }
proc_pgmap& operator=(const proc_pgmap&) = delete;
proc_pgmap(const proc_pgmap& x) = delete;
private:
proc_pgmap();
proc_pgmap(vmap* vmap);
~proc_pgmap();
NEW_DELETE_OPS(proc_pgmap)
};
......
......@@ -122,6 +122,9 @@ struct vmap {
int copyout(uptr va, void *p, u64 len);
int sbrk(ssize_t n, uptr *addr);
void add_pgmap(proc_pgmap* pgmap);
void rem_pgmap(proc_pgmap* pgmap);
uptr brk_; // Top of heap
private:
......@@ -134,4 +137,11 @@ private:
uptr unmapped_area(size_t n);
struct spinlock brklock_;
// XXX(sbw) most likely an awful hash function
static u64 proc_pgmap_hash(proc_pgmap* const & p)
{
return (u64) p;
}
xns<proc_pgmap*, proc_pgmap*, proc_pgmap_hash> pgmap_list_;
};
......@@ -181,7 +181,7 @@ exec(const char *path, char **argv, void *ascopev)
if((vmp = vmap::alloc()) == 0)
goto bad;
if ((pgmap = proc_pgmap::alloc()) == 0)
if ((pgmap = proc_pgmap::alloc(vmp)) == 0)
goto bad;
for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){
......
......@@ -30,23 +30,28 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 };
proc_pgmap::proc_pgmap(void)
: pml4(setupkvm())
proc_pgmap::proc_pgmap(vmap* vmap)
: pml4(setupkvm()), vmp(vmap)
{
if (pml4 == nullptr) {
cprintf("proc_pgmap::proc_pgmap: setupkvm out of memory\n");
throw_bad_alloc();
}
vmp->ref++;
vmp->add_pgmap(this);
}
proc_pgmap*
proc_pgmap::alloc(void)
proc_pgmap::alloc(vmap* vmap)
{
return new proc_pgmap();
return new proc_pgmap(vmap);
}
proc_pgmap::~proc_pgmap(void)
{
vmp->rem_pgmap(this);
vmp->decref();
freevm(pml4);
}
......@@ -456,7 +461,7 @@ fork(int flags)
np->vmap = myproc()->vmap;
np->vmap->ref++;
if (flags & FORK_SEPARATE_PGMAP) {
np->pgmap = proc_pgmap::alloc();
np->pgmap = proc_pgmap::alloc(np->vmap);
} else {
np->pgmap = myproc()->pgmap;
myproc()->pgmap->inc();
......@@ -464,7 +469,7 @@ fork(int flags)
} else {
// Copy process state from p.
np->vmap = myproc()->vmap->copy(cow, myproc()->pgmap);
np->pgmap = proc_pgmap::alloc();
np->pgmap = proc_pgmap::alloc(np->vmap);
}
np->parent = myproc();
......
......@@ -29,7 +29,7 @@ inituser(void)
bootproc = p;
if((p->vmap = vmap::alloc()) == 0)
panic("userinit: out of vmaps?");
if ((p->pgmap = proc_pgmap::alloc()) == 0)
if ((p->pgmap = proc_pgmap::alloc(p->vmap)) == 0)
panic("inituser: alloc proc_pgmap");
vmnode *vmn = new vmnode(PGROUNDUP(_initcode_size) / PGSIZE);
if(vmn == 0)
......
......@@ -197,6 +197,20 @@ to_stream(print_stream *s, vma *v)
* vmap
*/
void
vmap::add_pgmap(proc_pgmap* pgmap)
{
if (pgmap_list_.insert(pgmap, pgmap) < 0)
panic("vmap::add_pgmap");
}
void
vmap::rem_pgmap(proc_pgmap* pgmap)
{
if (!pgmap_list_.remove(pgmap, nullptr))
panic("vmap::rem_pgmap");
}
vmap*
vmap::alloc(void)
{
......@@ -210,15 +224,14 @@ vmap::vmap() :
#if VM_RADIX
vmas(PGSHIFT),
#endif
ref(1), kshared((char*) ksalloc(slab_kshared)), brk_(0)
ref(1), kshared((char*) ksalloc(slab_kshared)), brk_(0),
pgmap_list_(false)
{
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
if (kshared == nullptr) {
cprintf("vmap::vmap: kshared out of memory\n");
goto err;
}
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
return;
err:
......@@ -395,6 +408,7 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb, proc_pgmap* pgmap)
vma *e;
bool replaced = false;
bool fixed = (vma_start != 0);
bool updateall = true;
again:
if (!fixed) {
......@@ -453,25 +467,37 @@ again:
#if VM_RADIX
span.replace(e->vma_start, e->vma_end-e->vma_start, e);
#endif
// XXX(sbw) Replace should tell what cores to update
}
bool needtlb = false;
if (replaced)
updatepages(pgmap->pml4, e->vma_start, e->vma_end,
[&needtlb](atomic<pme_t> *p)
auto update = [&needtlb, &updateall](atomic<pme_t> *p) {
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true && updateall;
break;
}
}
};
if (replaced) {
if (updateall)
pgmap_list_.enumerate([&](proc_pgmap* const &p,
proc_pgmap* const &x)->bool
{
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true;
break;
}
}
updatepages(p->pml4, e->vma_start, e->vma_end, update);
return false;
});
else
updatepages(pgmap->pml4, e->vma_start, e->vma_end, update);
}
if (tlb_shootdown) {
if (needtlb && dotlb)
tlbflush();
......@@ -486,6 +512,7 @@ again:
int
vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap)
{
bool updateall = true;
{
// new scope to release the search lock before tlbflush
......@@ -512,22 +539,35 @@ vmap::remove(uptr vma_start, uptr len, proc_pgmap* pgmap)
// could skip the updatepages.
span.replace(vma_start, len, 0);
#endif
// XXX(sbw) Replace should tell what cores to update
}
bool needtlb = false;
updatepages(pgmap->pml4, vma_start, vma_start + len, [&needtlb](atomic<pme_t> *p) {
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true;
break;
}
auto update = [&needtlb, &updateall](atomic<pme_t> *p) {
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (!(v & PTE_P))
break;
if (cmpxch(p, v, (pme_t) 0)) {
needtlb = true && updateall;
break;
}
}
};
if (updateall)
pgmap_list_.enumerate([&](proc_pgmap* const &p,
proc_pgmap* const &x)->bool
{
updatepages(p->pml4, vma_start, vma_start + len, update);
return false;
});
else
updatepages(pgmap->pml4, vma_start, vma_start + len, update);
if (tlb_shootdown && needtlb) {
if (tlb_lazy) {
myproc()->unmap_tlbreq_ = tlbflush_req++;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论