提交 ca7c3fca 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

some more reorg

上级 eabc1d9c
...@@ -61,10 +61,10 @@ dosegment(uptr a0, u64 a1) ...@@ -61,10 +61,10 @@ dosegment(uptr a0, u64 a1)
int npg = (va_end - va_start) / PGSIZE; int npg = (va_end - va_start) / PGSIZE;
if (odp) { if (odp) {
if ((vmn = vmn_alloc(npg, ONDEMAND)) == 0) if ((vmn = new vmnode(npg, ONDEMAND)) == 0)
goto bad; goto bad;
} else { } else {
if ((vmn = vmn_allocpg(npg)) == 0) if ((vmn = new vmnode(npg)) == 0)
goto bad; goto bad;
} }
...@@ -93,7 +93,7 @@ static void dostack(uptr a0, u64 a1) ...@@ -93,7 +93,7 @@ static void dostack(uptr a0, u64 a1)
prof_start(dostack_prof); prof_start(dostack_prof);
// Allocate a one-page stack at the top of the (user) address space // Allocate a one-page stack at the top of the (user) address space
if((vmn = vmn_allocpg(USTACKPAGES)) == 0) if((vmn = new vmnode(USTACKPAGES)) == 0)
goto bad; goto bad;
if(args->vmap->insert(vmn, USERTOP-(USTACKPAGES*PGSIZE)) < 0) if(args->vmap->insert(vmn, USERTOP-(USTACKPAGES*PGSIZE)) < 0)
goto bad; goto bad;
...@@ -106,7 +106,7 @@ static void dostack(uptr a0, u64 a1) ...@@ -106,7 +106,7 @@ static void dostack(uptr a0, u64 a1)
goto bad; goto bad;
sp -= strlen(args->argv[argc]) + 1; sp -= strlen(args->argv[argc]) + 1;
sp &= ~7; sp &= ~7;
if(copyout(args->vmap, sp, args->argv[argc], strlen(args->argv[argc]) + 1) < 0) if(args->vmap->copyout(sp, args->argv[argc], strlen(args->argv[argc]) + 1) < 0)
goto bad; goto bad;
ustack[1+argc] = sp; ustack[1+argc] = sp;
} }
...@@ -118,7 +118,7 @@ static void dostack(uptr a0, u64 a1) ...@@ -118,7 +118,7 @@ static void dostack(uptr a0, u64 a1)
args->proc->tf->rsi = sp - (argc+1)*8; args->proc->tf->rsi = sp - (argc+1)*8;
sp -= (1+argc+1) * 8; sp -= (1+argc+1) * 8;
if(copyout(args->vmap, sp, ustack, (1+argc+1)*8) < 0) if(args->vmap->copyout(sp, ustack, (1+argc+1)*8) < 0)
goto bad; goto bad;
// Save program name for debugging. // Save program name for debugging.
...@@ -144,7 +144,7 @@ static void doheap(uptr a0, u64 a1) ...@@ -144,7 +144,7 @@ static void doheap(uptr a0, u64 a1)
prof_start(doheap_prof); prof_start(doheap_prof);
// Allocate a vmnode for the heap. // Allocate a vmnode for the heap.
// XXX pre-allocate 32 pages.. // XXX pre-allocate 32 pages..
if((vmn = vmn_allocpg(32)) == 0) if((vmn = new vmnode(32)) == 0)
goto bad; goto bad;
if(args->vmap->insert(vmn, BRK) < 0) if(args->vmap->insert(vmn, BRK) < 0)
goto bad; goto bad;
...@@ -161,7 +161,7 @@ int ...@@ -161,7 +161,7 @@ int
exec(char *path, char **argv) exec(char *path, char **argv)
{ {
struct inode *ip = NULL; struct inode *ip = NULL;
struct vmap *vmap = NULL; struct vmap *vmp = NULL;
struct vmnode *vmn = NULL; struct vmnode *vmn = NULL;
struct elfhdr elf; struct elfhdr elf;
struct proghdr ph; struct proghdr ph;
...@@ -183,14 +183,14 @@ exec(char *path, char **argv) ...@@ -183,14 +183,14 @@ exec(char *path, char **argv)
if(elf.magic != ELF_MAGIC) if(elf.magic != ELF_MAGIC)
goto bad; goto bad;
if((vmap = vmap_alloc()) == 0) if((vmp = new vmap()) == 0)
goto bad; goto bad;
// Arguments for work queue // Arguments for work queue
struct eargs args; struct eargs args;
args.proc = myproc(); args.proc = myproc();
args.ip = ip; args.ip = ip;
args.vmap = vmap; args.vmap = vmp;
args.path = path; args.path = path;
args.argv = argv; args.argv = argv;
...@@ -225,7 +225,7 @@ exec(char *path, char **argv) ...@@ -225,7 +225,7 @@ exec(char *path, char **argv)
// Commit to the user image. // Commit to the user image.
oldvmap = myproc()->vmap; oldvmap = myproc()->vmap;
myproc()->vmap = vmap; myproc()->vmap = vmp;
myproc()->brk = BRK + 8; // XXX so that brk-1 points within heap vma.. myproc()->brk = BRK + 8; // XXX so that brk-1 points within heap vma..
myproc()->tf->rip = elf.entry; // main myproc()->tf->rip = elf.entry; // main
...@@ -238,8 +238,8 @@ exec(char *path, char **argv) ...@@ -238,8 +238,8 @@ exec(char *path, char **argv)
bad: bad:
cprintf("exec failed\n"); cprintf("exec failed\n");
if(vmap) if(vmp)
vmap->decref(); vmp->decref();
if(vmn) if(vmn)
delete vmn; delete vmn;
gc_end_epoch(); gc_end_epoch();
......
...@@ -18,3 +18,13 @@ class rcu_freed { ...@@ -18,3 +18,13 @@ class rcu_freed {
virtual void do_gc(void) = 0; virtual void do_gc(void) = 0;
} __mpalign__; } __mpalign__;
void gc_begin_epoch();
void gc_end_epoch();
class scoped_gc_epoch {
public:
scoped_gc_epoch() { gc_begin_epoch(); }
~scoped_gc_epoch() { gc_end_epoch(); }
};
...@@ -97,8 +97,6 @@ void dir_flush(struct inode *dp); ...@@ -97,8 +97,6 @@ void dir_flush(struct inode *dp);
// gc.c // gc.c
void initgc(void); void initgc(void);
void initprocgc(struct proc *); void initprocgc(struct proc *);
void gc_begin_epoch();
void gc_end_epoch();
void gc_start(void); void gc_start(void);
#ifdef __cplusplus #ifdef __cplusplus
...@@ -249,12 +247,6 @@ void uartputc(char c); ...@@ -249,12 +247,6 @@ void uartputc(char c);
void uartintr(void); void uartintr(void);
// vm.c // vm.c
enum vmntype { EAGER, ONDEMAND };
struct vmap * vmap_alloc(void);
struct vmnode* vmn_alloc(u64, enum vmntype);
struct vmnode* vmn_allocpg(u64);
int copyout(struct vmap *, uptr, void*, u64);
void switchuvm(struct proc*); void switchuvm(struct proc*);
void switchkvm(void); void switchkvm(void);
int pagefault(struct vmap *, uptr, u32); int pagefault(struct vmap *, uptr, u32);
......
...@@ -14,12 +14,6 @@ using std::atomic; ...@@ -14,12 +14,6 @@ using std::atomic;
#define NHASH 30 #define NHASH 30
#endif #endif
class scoped_gc_epoch {
public:
scoped_gc_epoch() { gc_begin_epoch(); }
~scoped_gc_epoch() { gc_end_epoch(); }
};
template<class K, class V> template<class K, class V>
class xelem : public rcu_freed { class xelem : public rcu_freed {
public: public:
......
...@@ -274,15 +274,14 @@ inituser(void) ...@@ -274,15 +274,14 @@ inituser(void)
p = allocproc(); p = allocproc();
bootproc = p; bootproc = p;
if((p->vmap = vmap_alloc()) == 0) if((p->vmap = new vmap()) == 0)
panic("userinit: out of vmaps?"); panic("userinit: out of vmaps?");
struct vmnode *vmn = vmnode *vmn = new vmnode(PGROUNDUP(_initcode_size) / PGSIZE);
vmn_allocpg(PGROUNDUP(_initcode_size) / PGSIZE);
if(vmn == 0) if(vmn == 0)
panic("userinit: vmn_allocpg"); panic("userinit: vmn_allocpg");
if(p->vmap->insert(vmn, 0) < 0) if(p->vmap->insert(vmn, 0) < 0)
panic("userinit: vmap_insert"); panic("userinit: vmap_insert");
if(copyout(p->vmap, 0, _initcode_start, _initcode_size) < 0) if(p->vmap->copyout(0, _initcode_start, _initcode_size) < 0)
panic("userinit: copyout"); panic("userinit: copyout");
memset(p->tf, 0, sizeof(*p->tf)); memset(p->tf, 0, sizeof(*p->tf));
p->tf->cs = UCSEG | 0x3; p->tf->cs = UCSEG | 0x3;
...@@ -464,7 +463,7 @@ growproc(int n) ...@@ -464,7 +463,7 @@ growproc(int n)
return -1; return -1;
} }
struct vmnode *vmn = vmn_allocpg(PGROUNDUP(newn) / PGSIZE); vmnode *vmn = new vmnode(PGROUNDUP(newn) / PGSIZE);
if(vmn == 0){ if(vmn == 0){
release(&m->lock); release(&m->lock);
cprintf("growproc: vmn_allocpg failed\n"); cprintf("growproc: vmn_allocpg failed\n");
...@@ -680,7 +679,7 @@ threadalloc(void (*fn)(void *), void *arg) ...@@ -680,7 +679,7 @@ threadalloc(void (*fn)(void *), void *arg)
if (p == NULL) if (p == NULL)
return 0; return 0;
p->vmap = vmap_alloc(); p->vmap = new vmap();
if (p->vmap == NULL) { if (p->vmap == NULL) {
freeproc(p); freeproc(p);
return 0; return 0;
......
...@@ -110,7 +110,7 @@ sys_map(void) ...@@ -110,7 +110,7 @@ sys_map(void)
if (argint64(1, &len) < 0) if (argint64(1, &len) < 0)
return -1; return -1;
struct vmnode *vmn = vmn_allocpg(PGROUNDUP(len) / PGSIZE); vmnode *vmn = new vmnode(PGROUNDUP(len) / PGSIZE);
if (vmn == 0) if (vmn == 0)
return -1; return -1;
......
...@@ -19,6 +19,34 @@ extern "C" { ...@@ -19,6 +19,34 @@ extern "C" {
enum { vm_debug = 0 }; enum { vm_debug = 0 };
/*
* vmnode
*/
vmnode::vmnode(u64 npg, vmntype ntype)
: npages(npg), ref(0), type(ntype), ip(0), offset(0), sz(0)
{
if (npg > NELEM(page))
panic("vmnode too big\n");
memset(page, 0, sizeof(page));
if (type == EAGER)
assert(allocpg() == 0);
}
vmnode::~vmnode()
{
for(u64 i = 0; i < npages; i++) {
if (page[i]) {
kfree(page[i]);
page[i] = 0;
}
}
if (ip) {
iput(ip);
ip = 0;
}
}
void void
vmnode::decref() vmnode::decref()
{ {
...@@ -26,13 +54,6 @@ vmnode::decref() ...@@ -26,13 +54,6 @@ vmnode::decref()
delete this; delete this;
} }
vma::~vma()
{
if(n)
n->decref();
destroylock(&lock);
}
int int
vmnode::allocpg() vmnode::allocpg()
{ {
...@@ -44,50 +65,74 @@ vmnode::allocpg() ...@@ -44,50 +65,74 @@ vmnode::allocpg()
return 0; return 0;
} }
static struct vmnode * vmnode *
vmn_copy(struct vmnode *n) vmnode::copy()
{ {
struct vmnode *c = vmn_alloc(n->npages, n->type); vmnode *c = new vmnode(npages, type);
if(c != 0) { if(c != 0) {
c->type = n->type; c->type = type;
if (n->type == ONDEMAND) { if (type == ONDEMAND) {
c->ip = idup(n->ip); c->ip = idup(ip);
c->offset = n->offset; c->offset = offset;
c->sz = c->sz; c->sz = c->sz;
} }
if (n->page[0]) { // If the first page is present, all of them are present if (page[0]) { // If the first page is present, all of them are present
if (c->allocpg() < 0) { if (c->allocpg() < 0) {
cprintf("vmn_copy: out of memory\n"); cprintf("vmn_copy: out of memory\n");
delete c; delete c;
return 0; return 0;
} }
for(u64 i = 0; i < n->npages; i++) { for(u64 i = 0; i < npages; i++) {
memmove(c->page[i], n->page[i], PGSIZE); memmove(c->page[i], page[i], PGSIZE);
} }
} }
} }
return c; return c;
} }
struct vmnode * int
vmn_allocpg(u64 npg) vmnode::demand_load()
{ {
struct vmnode *n = vmn_alloc(npg, EAGER); for (u64 i = 0; i < sz; i += PGSIZE) {
if (n == 0) return 0; char *p = page[i / PGSIZE];
if (n->allocpg() < 0) { s64 n;
delete n; if (sz - i < PGSIZE)
return 0; n = sz - i;
else
n = PGSIZE;
if (readi(ip, p, offset+i, n) != n)
return -1;
} }
return n; return 0;
} }
void int
vmap::decref() vmnode::load(inode *iparg, u64 offarg, u64 szarg)
{ {
if (--ref == 0) ip = iparg;
delete this; offset = offarg;
sz = szarg;
if (type == ONDEMAND)
return 0;
return demand_load();
} }
/*
* vma
*/
vma::~vma()
{
if(n)
n->decref();
destroylock(&lock);
}
/*
* vmap
*/
vmap::vmap() vmap::vmap()
: cr(10) : cr(10)
{ {
...@@ -125,63 +170,165 @@ vmap::vmap() ...@@ -125,63 +170,165 @@ vmap::vmap()
destroylock(&lock); destroylock(&lock);
} }
vmap::~vmap()
{
for (range *r: cr) {
delete (vma*) r->value;
cr.del(r->key, r->size);
}
if (kshared)
ksfree(slab_kshared, kshared);
if (pml4)
freevm(pml4);
alloc = 0;
destroylock(&lock);
}
void
vmap::decref()
{
if (--ref == 0)
delete this;
}
vmap* vmap*
vmap_alloc() vmap::copy(int share)
{ {
return new vmap(); vmap *nm = new vmap();
if(nm == 0)
return 0;
scoped_acquire sa(&lock);
for (range *r: cr) {
struct vma *e = (struct vma *) r->value;
struct vma *ne = new vma();
if (ne == 0)
goto err;
ne->va_start = e->va_start;
ne->va_end = e->va_end;
if (share) {
ne->n = e->n;
ne->va_type = COW;
scoped_acquire sae(&e->lock);
e->va_type = COW;
updatepages(pml4, (void *) (e->va_start), (void *) (e->va_end), PTE_COW);
} else {
ne->n = e->n->copy();
ne->va_type = e->va_type;
}
if (ne->n == 0)
goto err;
ne->n->ref++;
nm->cr.add(ne->va_start, ne->va_end - ne->va_start, (void *) ne);
}
if (share)
lcr3(v2p(pml4)); // Reload hardware page table
return nm;
err:
delete nm;
return 0;
} }
int // Does any vma overlap start..start+len?
vmnode::demand_load() // If yes, return the vma pointer.
// If no, return 0.
// This code can't handle regions at the very end
// of the address space, e.g. 0xffffffff..0x0
// We key vma's by their end address.
vma *
vmap::lookup(uptr start, uptr len)
{ {
for (u64 i = 0; i < sz; i += PGSIZE) { if (start + len < start)
char *p = page[i / PGSIZE]; panic("vmap::lookup bad len");
s64 n;
if (sz - i < PGSIZE) range *r = cr.search(start, len);
n = sz - i; if (r != 0) {
else vma *e = (struct vma *) (r->value);
n = PGSIZE; if (e->va_end <= e->va_start)
if (readi(ip, p, offset+i, n) != n) panic("malformed va");
return -1; if (e->va_start < start+len && e->va_end > start)
return e;
} }
return 0; return 0;
} }
int int
vmnode::load(inode *iparg, u64 offarg, u64 szarg) vmap::insert(vmnode *n, uptr va_start)
{ {
ip = iparg; scoped_acquire sa(&lock);
offset = offarg; u64 len = n->npages * PGSIZE;
sz = szarg;
if (type == ONDEMAND) if (lookup(va_start, len)) {
return 0; cprintf("vmap_insert: overlap\n");
return demand_load(); return -1;
}
vma *e = new vma();
if (e == 0)
return -1;
e->va_start = va_start;
e->va_end = va_start + len;
e->n = n;
n->ref++;
cr.add(e->va_start, len, (void *) e);
return 0;
}
int
vmap::remove(uptr va_start, uptr len)
{
scoped_acquire sa(&lock);
uptr va_end = va_start + len;
struct range *r = cr.search(va_start, len);
if (r == 0)
panic("no vma?");
struct vma *e = (struct vma *) r->value;
if (e->va_start != va_start || e->va_end != va_end) {
cprintf("vmap_remove: partial unmap unsupported\n");
return -1;
}
cr.del(va_start, len);
gc_delayed(e);
return 0;
} }
static struct vma * /*
pagefault_ondemand(struct vmap *vmap, uptr va, u32 err, struct vma *m) * pagefault handling code on vmap
*/
vma *
vmap::pagefault_ondemand(uptr va, u32 err, vma *m)
{ {
if (m->n->allocpg() < 0) if (m->n->allocpg() < 0)
panic("pagefault: couldn't allocate pages"); panic("pagefault: couldn't allocate pages");
release(&m->lock); release(&m->lock);
if (m->n->demand_load() < 0) if (m->n->demand_load() < 0)
panic("pagefault: couldn't load"); panic("pagefault: couldn't load");
m = vmap->lookup(va, 1); m = lookup(va, 1);
if (!m) if (!m)
panic("pagefault_ondemand"); panic("pagefault_ondemand");
acquire(&m->lock); // re-acquire lock on m acquire(&m->lock); // re-acquire lock on m
return m; return m;
} }
static int int
pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg) vmap::pagefault_wcow(uptr va, pme_t *pte, vma *m, u64 npg)
{ {
// Always make a copy of n, even if this process has the only ref, // Always make a copy of n, even if this process has the only ref,
// because other processes may change ref count while this process // because other processes may change ref count while this process
// is handling wcow. // is handling wcow.
struct vmnode *n = m->n; struct vmnode *n = m->n;
struct vmnode *c = vmn_copy(m->n); struct vmnode *c = m->n->copy();
if (c == 0) { if (c == 0) {
cprintf("pagefault_wcow: out of mem\n"); cprintf("pagefault_wcow: out of mem\n");
return -1; return -1;
...@@ -190,8 +337,8 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg) ...@@ -190,8 +337,8 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg)
m->va_type = PRIVATE; m->va_type = PRIVATE;
m->n = c; m->n = c;
// Update the hardware page tables to reflect the change to the vma // Update the hardware page tables to reflect the change to the vma
updatepages(vmap->pml4, (void *) m->va_start, (void *) m->va_end, 0); updatepages(pml4, (void *) m->va_start, (void *) m->va_end, 0);
pte = walkpgdir(vmap->pml4, (const void *)va, 0); pte = walkpgdir(pml4, (const void *)va, 0);
*pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_W; *pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
// drop my ref to vmnode // drop my ref to vmnode
n->decref(); n->decref();
...@@ -199,35 +346,32 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg) ...@@ -199,35 +346,32 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg)
} }
int int
pagefault(struct vmap *vmap, uptr va, u32 err) vmap::pagefault(uptr va, u32 err)
{ {
pme_t *pte = walkpgdir(vmap->pml4, (const void *)va, 1); pme_t *pte = walkpgdir(pml4, (const void *)va, 1);
// optimize checks of args to syscals // optimize checks of args to syscals
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W)) if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W))
return 0; return 0;
gc_begin_epoch(); scoped_gc_epoch gc;
vma *m = vmap->lookup(va, 1); vma *m = lookup(va, 1);
if (m == 0) { if (m == 0)
gc_end_epoch();
return -1; return -1;
}
acquire(&m->lock); acquire(&m->lock);
u64 npg = (PGROUNDDOWN(va) - m->va_start) / PGSIZE; u64 npg = (PGROUNDDOWN(va) - m->va_start) / PGSIZE;
if (m->n && m->n->type == ONDEMAND && m->n->page[npg] == 0) if (m->n && m->n->type == ONDEMAND && m->n->page[npg] == 0)
m = pagefault_ondemand(vmap, va, err, m); m = pagefault_ondemand(va, err, m);
if (vm_debug) if (vm_debug)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n", cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, m->n->ref.load(), myproc()->pid); err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->va_type == COW && (err & FEC_WR)) { if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) { if (pagefault_wcow(va, pte, m, npg) < 0) {
release(&m->lock); release(&m->lock);
gc_end_epoch();
return -1; return -1;
} }
} else if (m->va_type == COW) { } else if (m->va_type == COW) {
...@@ -239,41 +383,29 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -239,41 +383,29 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
} }
// XXX(sbw) Why reload hardware page tables? // XXX(sbw) Why reload hardware page tables?
lcr3(v2p(vmap->pml4)); // Reload hardware page tables lcr3(v2p(pml4)); // Reload hardware page tables
release(&m->lock); release(&m->lock);
gc_end_epoch();
return 1; return 1;
} }
vmnode::~vmnode() int
pagefault(struct vmap *vmap, uptr va, u32 err)
{ {
for(u64 i = 0; i < npages; i++) { return vmap->pagefault(va, err);
if (page[i]) {
kfree(page[i]);
page[i] = 0;
}
}
if (ip) {
iput(ip);
ip = 0;
}
} }
// Copy len bytes from p to user address va in vmap. // Copy len bytes from p to user address va in vmap.
// Most useful when vmap is not the current page table. // Most useful when vmap is not the current page table.
int int
copyout(struct vmap *vmap, uptr va, void *p, u64 len) vmap::copyout(uptr va, void *p, u64 len)
{ {
char *buf = (char*)p; char *buf = (char*)p;
while(len > 0){ while(len > 0){
uptr va0 = (uptr)PGROUNDDOWN(va); uptr va0 = (uptr)PGROUNDDOWN(va);
gc_begin_epoch(); scoped_gc_epoch gc;
vma *vma = vmap->lookup(va, 1); vma *vma = lookup(va, 1);
if(vma == 0) { if(vma == 0)
gc_end_epoch();
return -1; return -1;
}
acquire(&vma->lock); acquire(&vma->lock);
uptr pn = (va0 - vma->va_start) / PGSIZE; uptr pn = (va0 - vma->va_start) / PGSIZE;
...@@ -288,146 +420,6 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len) ...@@ -288,146 +420,6 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
buf += n; buf += n;
va = va0 + PGSIZE; va = va0 + PGSIZE;
release(&vma->lock); release(&vma->lock);
gc_end_epoch();
}
return 0;
}
struct vmnode *
vmn_alloc(u64 npg, enum vmntype type)
{
return new vmnode(npg, type);
}
vmnode::vmnode(u64 npg, vmntype ntype)
: npages(npg), ref(0), type(ntype), ip(0), offset(0), sz(0)
{
if (npg > NELEM(page))
panic("vmnode too big\n");
memset(page, 0, sizeof(page));
}
vmap::~vmap()
{
for (range *r: cr) {
delete (vma*) r->value;
cr.del(r->key, r->size);
}
if (kshared)
ksfree(slab_kshared, kshared);
if (pml4)
freevm(pml4);
alloc = 0;
destroylock(&lock);
}
// Does any vma overlap start..start+len?
// If yes, return the vma pointer.
// If no, return 0.
// This code can't handle regions at the very end
// of the address space, e.g. 0xffffffff..0x0
// We key vma's by their end address.
vma *
vmap::lookup(uptr start, uptr len)
{
if(start + len < start)
panic("vmap::lookup bad len");
range *r = cr.search(start, len);
if (r != 0) {
vma *e = (struct vma *) (r->value);
if (e->va_end <= e->va_start)
panic("malformed va");
if (e->va_start < start+len && e->va_end > start)
return e;
}
return 0;
}
int
vmap::insert(vmnode *n, uptr va_start)
{
scoped_acquire sa(&lock);
u64 len = n->npages * PGSIZE;
if (lookup(va_start, len)) {
cprintf("vmap_insert: overlap\n");
return -1;
} }
vma *e = new vma();
if (e == 0)
return -1;
e->va_start = va_start;
e->va_end = va_start + len;
e->n = n;
n->ref++;
cr.add(e->va_start, len, (void *) e);
return 0;
}
vmap*
vmap::copy(int share)
{
struct vmap *nm = vmap_alloc();
if(nm == 0)
return 0;
scoped_acquire sa(&lock);
for (range *r: cr) {
struct vma *e = (struct vma *) r->value;
struct vma *ne = new vma();
if (ne == 0)
goto err;
ne->va_start = e->va_start;
ne->va_end = e->va_end;
if (share) {
ne->n = e->n;
ne->va_type = COW;
scoped_acquire sae(&e->lock);
e->va_type = COW;
updatepages(pml4, (void *) (e->va_start), (void *) (e->va_end), PTE_COW);
} else {
ne->n = vmn_copy(e->n);
ne->va_type = e->va_type;
}
if (ne->n == 0)
goto err;
ne->n->ref++;
nm->cr.add(ne->va_start, ne->va_end - ne->va_start, (void *) ne);
}
if (share)
lcr3(v2p(pml4)); // Reload hardware page table
return nm;
err:
delete nm;
return 0;
}
int
vmap::remove(uptr va_start, uptr len)
{
scoped_acquire sa(&lock);
uptr va_end = va_start + len;
struct range *r = cr.search(va_start, len);
if (r == 0)
panic("no vma?");
struct vma *e = (struct vma *) r->value;
if(e->va_start != va_start || e->va_end != va_end) {
cprintf("vmap_remove: partial unmap unsupported\n");
return -1;
}
cr.del(va_start, len);
gc_delayed(e);
return 0; return 0;
} }
...@@ -4,9 +4,32 @@ ...@@ -4,9 +4,32 @@
using std::atomic; using std::atomic;
// A memory object (physical pages or inode).
enum vmntype { EAGER, ONDEMAND };
struct vmnode {
u64 npages;
char *page[128];
atomic<u64> ref;
enum vmntype type;
struct inode *ip;
u64 offset;
u64 sz;
vmnode(u64 npg, vmntype type = EAGER);
~vmnode();
void decref();
int allocpg();
vmnode* copy();
int load(inode *ip, u64 offset, u64 sz);
int demand_load();
};
// A mapping of a chunk of an address space to // A mapping of a chunk of an address space to
// a specific memory object. // a specific memory object.
enum vmatype { PRIVATE, COW}; enum vmatype { PRIVATE, COW };
struct vma : public rcu_freed { struct vma : public rcu_freed {
uptr va_start; // start of mapping uptr va_start; // start of mapping
uptr va_end; // one past the last byte uptr va_end; // one past the last byte
...@@ -25,25 +48,6 @@ struct vma : public rcu_freed { ...@@ -25,25 +48,6 @@ struct vma : public rcu_freed {
virtual void do_gc() { delete this; } virtual void do_gc() { delete this; }
}; };
// A memory object (physical pages or inode).
struct vmnode {
u64 npages;
char *page[128];
atomic<u64> ref;
enum vmntype type;
struct inode *ip;
u64 offset;
u64 sz;
vmnode(u64 npg, vmntype type);
~vmnode();
void decref();
int allocpg();
int load(inode *ip, u64 offset, u64 sz);
int demand_load();
};
// An address space: a set of vmas plus h/w page table. // An address space: a set of vmas plus h/w page table.
// The elements of e[] are not ordered by address. // The elements of e[] are not ordered by address.
struct vmap { struct vmap {
...@@ -63,4 +67,11 @@ struct vmap { ...@@ -63,4 +67,11 @@ struct vmap {
vma* lookup(uptr start, uptr len); vma* lookup(uptr start, uptr len);
int insert(vmnode *n, uptr va_start); int insert(vmnode *n, uptr va_start);
int remove(uptr start, uptr len); int remove(uptr start, uptr len);
int pagefault(uptr va, u32 err);
int copyout(uptr va, void *p, u64 len);
private:
vma* pagefault_ondemand(uptr va, u32 err, vma *m);
int pagefault_wcow(uptr va, pme_t *pte, vma *m, u64 npg);
}; };
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论