提交 cbc50063 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

fix up growproc() to use crange correctly

no need for vmap::lock anymore either
上级 e6b2a3df
......@@ -135,7 +135,6 @@ struct crange {
range *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
static void mark(range *f, range *s);
static void freen(struct range *f, struct range *l);
static int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
void print(int);
......@@ -193,6 +192,7 @@ struct crange_locked {
range_iterator begin() const { return range_iterator(prev_->next[0].ptr()); };
range_iterator end() const { return range_iterator(succ_); };
void replace(range *r);
void replace(range *prev, range *repl);
};
static inline range_iterator
......
......@@ -48,12 +48,10 @@ struct vma : public range {
// The elements of e[] are not ordered by address.
struct vmap {
struct crange cr;
struct spinlock lock; // serialize map/lookup/unmap
atomic<u64> ref;
u64 alloc;
pgmap *const pml4; // Page table
char *const kshared;
char lockname[16];
vmap();
~vmap();
......
......@@ -479,8 +479,11 @@ crange_locked::~crange_locked()
}
void
crange_locked::replace(range *repl)
crange_locked::replace(range *prev, range *repl)
{
if (!prev)
prev = prev_;
range *newlast = 0;
for (range *e = repl; e; e = e->next[0].ptr()) {
assert(e->key >= base_ && e->key + e->size <= base_ + size_);
......@@ -493,8 +496,8 @@ crange_locked::replace(range *repl)
// do compare-exchange first, and only then mark the old ranges as deleted;
// otherwise, concurrent readers may not find either old or new ranges.
range *replaced = prev_->next[0].ptr();
prev_->next[0] = repl ?: succ_;
range *replaced = prev->next[0].ptr();
prev->next[0] = repl ?: succ_;
crange::mark(replaced, succ_);
for (range *e = replaced; e && e != succ_; e = e->next[0].ptr()) {
......@@ -502,3 +505,9 @@ crange_locked::replace(range *repl)
e->dec_ref();
}
}
void
crange_locked::replace(range *repl)
{
replace(0, repl);
}
......@@ -408,74 +408,58 @@ int
growproc(int n)
{
struct vmap *m = myproc()->vmap;
auto curbrk = myproc()->brk;
if(n < 0 && 0 - n <= myproc()->brk){
if(n < 0 && 0 - n <= curbrk){
myproc()->brk += n;
return 0;
}
if(n < 0 || n > USERTOP || myproc()->brk + n > USERTOP)
if(n < 0 || n > USERTOP || curbrk + n > USERTOP)
return -1;
acquire(&m->lock);
// find first unallocated address in brk..brk+n
uptr newstart = myproc()->brk;
u64 newn = n;
gc_begin_epoch();
while(newn > 0){
vma *e = m->lookup(newstart, 1);
if(e == 0)
break;
if(e->vma_end >= newstart + newn){
newstart += newn;
newn = 0;
break;
}
newn -= e->vma_end - newstart;
newstart = e->vma_end;
}
gc_end_epoch();
if(newn <= 0){
// no need to allocate
myproc()->brk += n;
release(&m->lock);
switchuvm(myproc());
return 0;
}
// is there space for newstart..newstart+newn?
if(m->lookup(newstart, newn) != 0){
cprintf("growproc: not enough room in address space; brk %lx n %d\n",
myproc()->brk, n);
return -1;
}
// look one page ahead, to check if the newly allocated region would
// abut the next-higher vma? we can't allow that, since then a future
// sbrk() would start to use the next region (e.g. the stack).
uptr newstart = PGROUNDUP(curbrk);
s64 newn = PGROUNDUP(n + curbrk - newstart);
range *prev = 0;
auto span = m->cr.search_lock(newstart, newstart + newn + PGSIZE);
for (range *r: span) {
vma *e = (vma*) r;
if (e->vma_start <= newstart) {
if (e->vma_end >= newstart + newn) {
myproc()->brk += n;
switchuvm(myproc());
return 0;
}
// would the newly allocated region abut the next-higher
// vma? we can't allow that, since then a future sbrk()
// would start to use the next region (e.g. the stack).
if(m->lookup(PGROUNDUP(newstart+newn), 1) != 0){
cprintf("growproc: would abut next vma; brk %lx n %d\n",
myproc()->brk, n);
return -1;
newn -= e->vma_end - newstart;
newstart = e->vma_end;
prev = e;
} else {
cprintf("growproc: overlap with existing mapping; brk %lx n %d\n",
curbrk, n);
return -1;
}
}
vmnode *vmn = new vmnode(PGROUNDUP(newn) / PGSIZE);
vmnode *vmn = new vmnode(newn / PGSIZE);
if(vmn == 0){
release(&m->lock);
cprintf("growproc: vmn_allocpg failed\n");
return -1;
}
release(&m->lock); // XXX
if(m->insert(vmn, newstart) < 0){
vma *repl = new vma(m, newstart, newstart+newn, PRIVATE, vmn);
if (!repl) {
cprintf("growproc: out of vma\n");
delete vmn;
cprintf("growproc: vmap_insert failed\n");
return -1;
}
span.replace(prev, repl);
myproc()->brk += n;
switchuvm(myproc());
return 0;
......
......@@ -140,9 +140,6 @@ vma::~vma()
vmap::vmap()
: cr(10), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared))
{
snprintf(lockname, sizeof(lockname), "vmap:%p", this);
initlock(&lock, lockname, LOCKSTAT_VM);
ref = 1;
alloc = 0;
......@@ -168,7 +165,6 @@ vmap::vmap()
ksfree(slab_kshared, kshared);
if (pml4)
freevm(pml4);
destroylock(&lock);
}
vmap::~vmap()
......@@ -178,7 +174,6 @@ vmap::~vmap()
if (pml4)
freevm(pml4);
alloc = 0;
destroylock(&lock);
}
void
......@@ -207,9 +202,6 @@ vmap::copy(int share)
if(nm == 0)
return 0;
scoped_acquire sa(&lock);
// XXX how to construct a consistent copy?
for (range *r: cr) {
vma *e = (vma *) r;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论