提交 cbc50063 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

fix up growproc() to use crange correctly

no need for vmap::lock anymore either
上级 e6b2a3df
...@@ -135,7 +135,6 @@ struct crange { ...@@ -135,7 +135,6 @@ struct crange {
range *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0) range *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
static void mark(range *f, range *s); static void mark(range *f, range *s);
static void freen(struct range *f, struct range *l);
static int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr); static int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
void print(int); void print(int);
...@@ -193,6 +192,7 @@ struct crange_locked { ...@@ -193,6 +192,7 @@ struct crange_locked {
range_iterator begin() const { return range_iterator(prev_->next[0].ptr()); }; range_iterator begin() const { return range_iterator(prev_->next[0].ptr()); };
range_iterator end() const { return range_iterator(succ_); }; range_iterator end() const { return range_iterator(succ_); };
void replace(range *r); void replace(range *r);
void replace(range *prev, range *repl);
}; };
static inline range_iterator static inline range_iterator
......
...@@ -48,12 +48,10 @@ struct vma : public range { ...@@ -48,12 +48,10 @@ struct vma : public range {
// The elements of e[] are not ordered by address. // The elements of e[] are not ordered by address.
struct vmap { struct vmap {
struct crange cr; struct crange cr;
struct spinlock lock; // serialize map/lookup/unmap
atomic<u64> ref; atomic<u64> ref;
u64 alloc; u64 alloc;
pgmap *const pml4; // Page table pgmap *const pml4; // Page table
char *const kshared; char *const kshared;
char lockname[16];
vmap(); vmap();
~vmap(); ~vmap();
......
...@@ -479,8 +479,11 @@ crange_locked::~crange_locked() ...@@ -479,8 +479,11 @@ crange_locked::~crange_locked()
} }
void void
crange_locked::replace(range *repl) crange_locked::replace(range *prev, range *repl)
{ {
if (!prev)
prev = prev_;
range *newlast = 0; range *newlast = 0;
for (range *e = repl; e; e = e->next[0].ptr()) { for (range *e = repl; e; e = e->next[0].ptr()) {
assert(e->key >= base_ && e->key + e->size <= base_ + size_); assert(e->key >= base_ && e->key + e->size <= base_ + size_);
...@@ -493,8 +496,8 @@ crange_locked::replace(range *repl) ...@@ -493,8 +496,8 @@ crange_locked::replace(range *repl)
// do compare-exchange first, and only then mark the old ranges as deleted; // do compare-exchange first, and only then mark the old ranges as deleted;
// otherwise, concurrent readers may not find either old or new ranges. // otherwise, concurrent readers may not find either old or new ranges.
range *replaced = prev_->next[0].ptr(); range *replaced = prev->next[0].ptr();
prev_->next[0] = repl ?: succ_; prev->next[0] = repl ?: succ_;
crange::mark(replaced, succ_); crange::mark(replaced, succ_);
for (range *e = replaced; e && e != succ_; e = e->next[0].ptr()) { for (range *e = replaced; e && e != succ_; e = e->next[0].ptr()) {
...@@ -502,3 +505,9 @@ crange_locked::replace(range *repl) ...@@ -502,3 +505,9 @@ crange_locked::replace(range *repl)
e->dec_ref(); e->dec_ref();
} }
} }
void
crange_locked::replace(range *repl)
{
replace(0, repl);
}
...@@ -408,74 +408,58 @@ int ...@@ -408,74 +408,58 @@ int
growproc(int n) growproc(int n)
{ {
struct vmap *m = myproc()->vmap; struct vmap *m = myproc()->vmap;
auto curbrk = myproc()->brk;
if(n < 0 && 0 - n <= myproc()->brk){ if(n < 0 && 0 - n <= curbrk){
myproc()->brk += n; myproc()->brk += n;
return 0; return 0;
} }
if(n < 0 || n > USERTOP || myproc()->brk + n > USERTOP) if(n < 0 || n > USERTOP || curbrk + n > USERTOP)
return -1; return -1;
acquire(&m->lock); // look one page ahead, to check if the newly allocated region would
// abut the next-higher vma? we can't allow that, since then a future
// find first unallocated address in brk..brk+n // sbrk() would start to use the next region (e.g. the stack).
uptr newstart = myproc()->brk; uptr newstart = PGROUNDUP(curbrk);
u64 newn = n; s64 newn = PGROUNDUP(n + curbrk - newstart);
gc_begin_epoch(); range *prev = 0;
while(newn > 0){ auto span = m->cr.search_lock(newstart, newstart + newn + PGSIZE);
vma *e = m->lookup(newstart, 1); for (range *r: span) {
if(e == 0) vma *e = (vma*) r;
break;
if(e->vma_end >= newstart + newn){ if (e->vma_start <= newstart) {
newstart += newn; if (e->vma_end >= newstart + newn) {
newn = 0; myproc()->brk += n;
break; switchuvm(myproc());
} return 0;
newn -= e->vma_end - newstart; }
newstart = e->vma_end;
}
gc_end_epoch();
if(newn <= 0){
// no need to allocate
myproc()->brk += n;
release(&m->lock);
switchuvm(myproc());
return 0;
}
// is there space for newstart..newstart+newn?
if(m->lookup(newstart, newn) != 0){
cprintf("growproc: not enough room in address space; brk %lx n %d\n",
myproc()->brk, n);
return -1;
}
// would the newly allocated region abut the next-higher newn -= e->vma_end - newstart;
// vma? we can't allow that, since then a future sbrk() newstart = e->vma_end;
// would start to use the next region (e.g. the stack). prev = e;
if(m->lookup(PGROUNDUP(newstart+newn), 1) != 0){ } else {
cprintf("growproc: would abut next vma; brk %lx n %d\n", cprintf("growproc: overlap with existing mapping; brk %lx n %d\n",
myproc()->brk, n); curbrk, n);
return -1; return -1;
}
} }
vmnode *vmn = new vmnode(PGROUNDUP(newn) / PGSIZE); vmnode *vmn = new vmnode(newn / PGSIZE);
if(vmn == 0){ if(vmn == 0){
release(&m->lock);
cprintf("growproc: vmn_allocpg failed\n"); cprintf("growproc: vmn_allocpg failed\n");
return -1; return -1;
} }
release(&m->lock); // XXX vma *repl = new vma(m, newstart, newstart+newn, PRIVATE, vmn);
if (!repl) {
if(m->insert(vmn, newstart) < 0){ cprintf("growproc: out of vma\n");
delete vmn; delete vmn;
cprintf("growproc: vmap_insert failed\n");
return -1; return -1;
} }
span.replace(prev, repl);
myproc()->brk += n; myproc()->brk += n;
switchuvm(myproc()); switchuvm(myproc());
return 0; return 0;
......
...@@ -140,9 +140,6 @@ vma::~vma() ...@@ -140,9 +140,6 @@ vma::~vma()
vmap::vmap() vmap::vmap()
: cr(10), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)) : cr(10), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared))
{ {
snprintf(lockname, sizeof(lockname), "vmap:%p", this);
initlock(&lock, lockname, LOCKSTAT_VM);
ref = 1; ref = 1;
alloc = 0; alloc = 0;
...@@ -168,7 +165,6 @@ vmap::vmap() ...@@ -168,7 +165,6 @@ vmap::vmap()
ksfree(slab_kshared, kshared); ksfree(slab_kshared, kshared);
if (pml4) if (pml4)
freevm(pml4); freevm(pml4);
destroylock(&lock);
} }
vmap::~vmap() vmap::~vmap()
...@@ -178,7 +174,6 @@ vmap::~vmap() ...@@ -178,7 +174,6 @@ vmap::~vmap()
if (pml4) if (pml4)
freevm(pml4); freevm(pml4);
alloc = 0; alloc = 0;
destroylock(&lock);
} }
void void
...@@ -207,9 +202,6 @@ vmap::copy(int share) ...@@ -207,9 +202,6 @@ vmap::copy(int share)
if(nm == 0) if(nm == 0)
return 0; return 0;
scoped_acquire sa(&lock);
// XXX how to construct a consistent copy?
for (range *r: cr) { for (range *r: cr) {
vma *e = (vma *) r; vma *e = (vma *) r;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论