提交 eabc1d9c 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

more methods

上级 da29ebfc
...@@ -68,7 +68,7 @@ dosegment(uptr a0, u64 a1) ...@@ -68,7 +68,7 @@ dosegment(uptr a0, u64 a1)
goto bad; goto bad;
} }
if(vmn_load(vmn, args->ip, ph.offset, ph.filesz) < 0) if(vmn->load(args->ip, ph.offset, ph.filesz) < 0)
goto bad; goto bad;
if(args->vmap->insert(vmn, ph.vaddr) < 0) if(args->vmap->insert(vmn, ph.vaddr) < 0)
...@@ -241,7 +241,7 @@ exec(char *path, char **argv) ...@@ -241,7 +241,7 @@ exec(char *path, char **argv)
if(vmap) if(vmap)
vmap->decref(); vmap->decref();
if(vmn) if(vmn)
vmn_free(vmn); delete vmn;
gc_end_epoch(); gc_end_epoch();
return 0; return 0;
......
...@@ -255,11 +255,9 @@ struct vmap * vmap_alloc(void); ...@@ -255,11 +255,9 @@ struct vmap * vmap_alloc(void);
struct vmnode* vmn_alloc(u64, enum vmntype); struct vmnode* vmn_alloc(u64, enum vmntype);
struct vmnode* vmn_allocpg(u64); struct vmnode* vmn_allocpg(u64);
int copyout(struct vmap *, uptr, void*, u64); int copyout(struct vmap *, uptr, void*, u64);
void vmn_free(struct vmnode *);
void switchuvm(struct proc*); void switchuvm(struct proc*);
void switchkvm(void); void switchkvm(void);
int pagefault(struct vmap *, uptr, u32); int pagefault(struct vmap *, uptr, u32);
int vmn_load(struct vmnode *, struct inode*, u64, u64);
void updatepages(pml4e_t*, void*, void*, int); void updatepages(pml4e_t*, void*, void*, int);
// wq.c // wq.c
......
...@@ -474,7 +474,7 @@ growproc(int n) ...@@ -474,7 +474,7 @@ growproc(int n)
release(&m->lock); // XXX release(&m->lock); // XXX
if(m->insert(vmn, newstart) < 0){ if(m->insert(vmn, newstart) < 0){
vmn_free(vmn); delete vmn;
cprintf("growproc: vmap_insert failed\n"); cprintf("growproc: vmap_insert failed\n");
return -1; return -1;
} }
......
...@@ -115,7 +115,7 @@ sys_map(void) ...@@ -115,7 +115,7 @@ sys_map(void)
return -1; return -1;
if (myproc()->vmap->insert(vmn, PGROUNDDOWN(addr)) < 0) { if (myproc()->vmap->insert(vmn, PGROUNDDOWN(addr)) < 0) {
vmn_free(vmn); delete vmn;
return -1; return -1;
} }
......
...@@ -19,27 +19,27 @@ extern "C" { ...@@ -19,27 +19,27 @@ extern "C" {
enum { vm_debug = 0 }; enum { vm_debug = 0 };
static void void
vmn_decref(struct vmnode *n) vmnode::decref()
{ {
if(--n->ref == 0) if(--ref == 0)
vmn_free(n); delete this;
} }
vma::~vma() vma::~vma()
{ {
if(n) if(n)
vmn_decref(n); n->decref();
destroylock(&lock); destroylock(&lock);
} }
static int int
vmn_doallocpg(struct vmnode *n) vmnode::allocpg()
{ {
for(u64 i = 0; i < n->npages; i++) { for(u64 i = 0; i < npages; i++) {
if((n->page[i] = kalloc()) == 0) if((page[i] = kalloc()) == 0)
return -1; return -1;
memset((char *) n->page[i], 0, PGSIZE); memset((char *) page[i], 0, PGSIZE);
} }
return 0; return 0;
} }
...@@ -56,9 +56,9 @@ vmn_copy(struct vmnode *n) ...@@ -56,9 +56,9 @@ vmn_copy(struct vmnode *n)
c->sz = c->sz; c->sz = c->sz;
} }
if (n->page[0]) { // If the first page is present, all of them are present if (n->page[0]) { // If the first page is present, all of them are present
if (vmn_doallocpg(c) < 0) { if (c->allocpg() < 0) {
cprintf("vmn_copy: out of memory\n"); cprintf("vmn_copy: out of memory\n");
vmn_free(c); delete c;
return 0; return 0;
} }
for(u64 i = 0; i < n->npages; i++) { for(u64 i = 0; i < n->npages; i++) {
...@@ -74,8 +74,8 @@ vmn_allocpg(u64 npg) ...@@ -74,8 +74,8 @@ vmn_allocpg(u64 npg)
{ {
struct vmnode *n = vmn_alloc(npg, EAGER); struct vmnode *n = vmn_alloc(npg, EAGER);
if (n == 0) return 0; if (n == 0) return 0;
if (vmn_doallocpg(n) < 0) { if (n->allocpg() < 0) {
vmn_free(n); delete n;
return 0; return 0;
} }
return n; return n;
...@@ -131,46 +131,42 @@ vmap_alloc() ...@@ -131,46 +131,42 @@ vmap_alloc()
return new vmap(); return new vmap();
} }
static int int
vmn_doload(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz) vmnode::demand_load()
{ {
for(u64 i = 0; i < sz; i += PGSIZE){ for (u64 i = 0; i < sz; i += PGSIZE) {
char *p = vmn->page[i / PGSIZE]; char *p = page[i / PGSIZE];
s64 n; s64 n;
if(sz - i < PGSIZE) if (sz - i < PGSIZE)
n = sz - i; n = sz - i;
else else
n = PGSIZE; n = PGSIZE;
if(readi(ip, p, offset+i, n) != n) if (readi(ip, p, offset+i, n) != n)
return -1; return -1;
} }
return 0; return 0;
} }
// Load a program segment into a vmnode.
int int
vmn_load(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz) vmnode::load(inode *iparg, u64 offarg, u64 szarg)
{ {
if (vmn->type == ONDEMAND) { ip = iparg;
vmn->ip = ip; offset = offarg;
vmn->offset = offset; sz = szarg;
vmn->sz = sz;
if (type == ONDEMAND)
return 0; return 0;
} else { return demand_load();
return vmn_doload(vmn, ip, offset, sz);
}
} }
static struct vma * static struct vma *
pagefault_ondemand(struct vmap *vmap, uptr va, u32 err, struct vma *m) pagefault_ondemand(struct vmap *vmap, uptr va, u32 err, struct vma *m)
{ {
if (vmn_doallocpg(m->n) < 0) { if (m->n->allocpg() < 0)
panic("pagefault: couldn't allocate pages"); panic("pagefault: couldn't allocate pages");
}
release(&m->lock); release(&m->lock);
if (vmn_doload(m->n, m->n->ip, m->n->offset, m->n->sz) < 0) { if (m->n->demand_load() < 0)
panic("pagefault: couldn't load"); panic("pagefault: couldn't load");
}
m = vmap->lookup(va, 1); m = vmap->lookup(va, 1);
if (!m) if (!m)
panic("pagefault_ondemand"); panic("pagefault_ondemand");
...@@ -198,7 +194,7 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg) ...@@ -198,7 +194,7 @@ pagefault_wcow(struct vmap *vmap, uptr va, pme_t *pte, struct vma *m, u64 npg)
pte = walkpgdir(vmap->pml4, (const void *)va, 0); pte = walkpgdir(vmap->pml4, (const void *)va, 0);
*pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_W; *pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
// drop my ref to vmnode // drop my ref to vmnode
vmn_decref(n); n->decref();
return 0; return 0;
} }
...@@ -250,19 +246,18 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -250,19 +246,18 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
return 1; return 1;
} }
void vmnode::~vmnode()
vmn_free(struct vmnode *n)
{ {
for(u64 i = 0; i < n->npages; i++) { for(u64 i = 0; i < npages; i++) {
if (n->page[i]) { if (page[i]) {
kfree(n->page[i]); kfree(page[i]);
n->page[i] = 0; page[i] = 0;
} }
} }
if (n->ip) if (ip) {
iput(n->ip); iput(ip);
n->ip = 0; ip = 0;
kmfree(n); }
} }
// Copy len bytes from p to user address va in vmap. // Copy len bytes from p to user address va in vmap.
...@@ -301,18 +296,15 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len) ...@@ -301,18 +296,15 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
struct vmnode * struct vmnode *
vmn_alloc(u64 npg, enum vmntype type) vmn_alloc(u64 npg, enum vmntype type)
{ {
struct vmnode *n = (struct vmnode *) kmalloc(sizeof(struct vmnode)); return new vmnode(npg, type);
if (n == 0) { }
cprintf("out of vmnodes");
return 0; vmnode::vmnode(u64 npg, vmntype ntype)
} : npages(npg), ref(0), type(ntype), ip(0), offset(0), sz(0)
if(npg > NELEM(n->page)) { {
if (npg > NELEM(page))
panic("vmnode too big\n"); panic("vmnode too big\n");
} memset(page, 0, sizeof(page));
memset(n, 0, sizeof(struct vmnode));
n->npages = npg;
n->type = type;
return n;
} }
vmap::~vmap() vmap::~vmap()
......
...@@ -34,6 +34,14 @@ struct vmnode { ...@@ -34,6 +34,14 @@ struct vmnode {
struct inode *ip; struct inode *ip;
u64 offset; u64 offset;
u64 sz; u64 sz;
vmnode(u64 npg, vmntype type);
~vmnode();
void decref();
int allocpg();
int load(inode *ip, u64 offset, u64 sz);
int demand_load();
}; };
// An address space: a set of vmas plus h/w page table. // An address space: a set of vmas plus h/w page table.
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论