提交 310f8cb8 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

vma inherits from range

上级 7dd0fbb3
...@@ -88,11 +88,6 @@ struct range : public rcu_freed { ...@@ -88,11 +88,6 @@ struct range : public rcu_freed {
private: private:
u64 key; u64 key;
u64 size; u64 size;
public:
rcu_freed *value;
private:
atomic<int> curlevel; // the current levels it appears on atomic<int> curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear int nlevel; // the number of levels this range should appear
crange *cr; // the crange this range is part of crange *cr; // the crange this range is part of
...@@ -102,14 +97,16 @@ struct range : public rcu_freed { ...@@ -102,14 +97,16 @@ struct range : public rcu_freed {
void print(int l); void print(int l);
void dec_ref(void); void dec_ref(void);
int lockif(markptr<range> e); int lockif(markptr<range> e);
~range();
friend class crange; friend class crange;
friend class crange_locked; friend class crange_locked;
friend class range_iterator; friend class range_iterator;
protected:
~range();
public: public:
range(crange *cr, u64 k, u64 sz, rcu_freed *v, range *n, int nlevel = 0); range(crange *cr, u64 k, u64 sz, int nlevel = 0);
virtual void do_gc() { virtual void do_gc() {
delete this; delete this;
...@@ -181,10 +178,9 @@ struct crange_locked { ...@@ -181,10 +178,9 @@ struct crange_locked {
scoped_gc_epoch gc; scoped_gc_epoch gc;
crange_locked(crange *cr, u64 base, u64 size, range *p, range *f, range *l, range *s); crange_locked(crange *cr, u64 base, u64 size, range *p, range *f, range *l, range *s);
friend class crange;
crange_locked(const crange_locked&) = delete; crange_locked(const crange_locked&) = delete;
crange_locked& operator=(const crange_locked&) = delete; crange_locked& operator=(const crange_locked&) = delete;
friend class crange;
public: public:
crange_locked(crange_locked &&x); crange_locked(crange_locked &&x);
......
...@@ -32,7 +32,7 @@ struct vmnode { ...@@ -32,7 +32,7 @@ struct vmnode {
// a specific memory object. // a specific memory object.
enum vmatype { PRIVATE, COW }; enum vmatype { PRIVATE, COW };
struct vma : public rcu_freed { struct vma : public range {
uptr vma_start; // start of mapping uptr vma_start; // start of mapping
uptr vma_end; // one past the last byte uptr vma_end; // one past the last byte
enum vmatype va_type; enum vmatype va_type;
...@@ -40,7 +40,7 @@ struct vma : public rcu_freed { ...@@ -40,7 +40,7 @@ struct vma : public rcu_freed {
struct spinlock lock; // serialize fault/unmap struct spinlock lock; // serialize fault/unmap
char lockname[16]; char lockname[16];
vma(); vma(vmap *vmap, uptr start, uptr end);
~vma(); ~vma();
virtual void do_gc() { delete this; } virtual void do_gc() { delete this; }
......
...@@ -86,7 +86,7 @@ void ...@@ -86,7 +86,7 @@ void
range::print(int l) range::print(int l)
{ {
cprintf ("0x%lx-0x%lx(%lu) 0x%lx, c %d, t %d, n 0x%lx m %d\n", cprintf ("0x%lx-0x%lx(%lu) 0x%lx, c %d, t %d, n 0x%lx m %d\n",
key, key+size, size, (long) value, curlevel.load(), nlevel, key, key+size, size, (long) this, curlevel.load(), nlevel,
(long) next, (bool) next[l].mark()); (long) next, (bool) next[l].mark());
} }
...@@ -100,8 +100,6 @@ range::~range() ...@@ -100,8 +100,6 @@ range::~range()
} }
kmalignfree(lock); kmalignfree(lock);
kmfree(next); kmfree(next);
if (value)
value->do_gc();
} }
void void
...@@ -116,13 +114,12 @@ range::dec_ref(void) ...@@ -116,13 +114,12 @@ range::dec_ref(void)
} }
} }
range::range(crange *crarg, u64 k, u64 sz, rcu_freed *v, range *n, int nl) range::range(crange *crarg, u64 k, u64 sz, int nl)
: rcu_freed("range_delayed") : rcu_freed("range_delayed")
{ {
dprintf("range::range: %lu %lu %d\n", k, sz, nl); dprintf("range::range: %lu %lu %d\n", k, sz, nl);
key = k; key = k;
size = sz; size = sz;
value = v;
cr = crarg; cr = crarg;
assert(cr->nlevel > 0); assert(cr->nlevel > 0);
curlevel = 0; curlevel = 0;
...@@ -130,8 +127,7 @@ range::range(crange *crarg, u64 k, u64 sz, rcu_freed *v, range *n, int nl) ...@@ -130,8 +127,7 @@ range::range(crange *crarg, u64 k, u64 sz, rcu_freed *v, range *n, int nl)
else nlevel = nl; else nlevel = nl;
next = new markptr<range>[nlevel]; // cache align? next = new markptr<range>[nlevel]; // cache align?
assert(next); assert(next);
next[0] = n; for (int l = 0; l < nlevel; l++) next[l] = 0;
for (int l = 1; l < nlevel; l++) next[l] = 0;
assert(kmalign((void **) &lock, CACHELINE, assert(kmalign((void **) &lock, CACHELINE,
sizeof(struct spinlock)) == 0); sizeof(struct spinlock)) == 0);
initlock(lock, "crange", LOCKSTAT_CRANGE); initlock(lock, "crange", LOCKSTAT_CRANGE);
...@@ -210,7 +206,7 @@ crange::crange(int nl) ...@@ -210,7 +206,7 @@ crange::crange(int nl)
{ {
assert(nl > 0); assert(nl > 0);
nlevel = nl; nlevel = nl;
crange_head = new range(this, 0, 0, nullptr, nullptr, nlevel); crange_head = new range(this, 0, 0, nlevel);
dprintf("crange::crange return 0x%lx\n", (u64) this); dprintf("crange::crange return 0x%lx\n", (u64) this);
} }
......
...@@ -119,8 +119,9 @@ vmnode::load(inode *iparg, u64 offarg, u64 szarg) ...@@ -119,8 +119,9 @@ vmnode::load(inode *iparg, u64 offarg, u64 szarg)
* vma * vma
*/ */
vma::vma() vma::vma(vmap *vmap, uptr start, uptr end)
: rcu_freed("vma"), vma_start(0), vma_end(0), va_type(PRIVATE), n(0) : range(&vmap->cr, start, end-start),
vma_start(start), vma_end(end), va_type(PRIVATE), n(0)
{ {
snprintf(lockname, sizeof(lockname), "vma:%p", this); snprintf(lockname, sizeof(lockname), "vma:%p", this);
initlock(&lock, lockname, LOCKSTAT_VM); initlock(&lock, lockname, LOCKSTAT_VM);
...@@ -199,16 +200,16 @@ vmap::copy(int share) ...@@ -199,16 +200,16 @@ vmap::copy(int share)
return 0; return 0;
scoped_acquire sa(&lock); scoped_acquire sa(&lock);
// XXX how to construct a consistent copy?
for (range *r: cr) { for (range *r: cr) {
struct vma *ne = new vma(); vma *e = (vma *) r;
scoped_acquire sae(&e->lock);
struct vma *ne = new vma(nm, e->vma_start, e->vma_end);
if (ne == 0) if (ne == 0)
goto err; goto err;
struct vma *e = (struct vma *) r->value;
scoped_acquire sae(&e->lock);
ne->vma_start = e->vma_start;
ne->vma_end = e->vma_end;
if (share) { if (share) {
ne->n = e->n; ne->n = e->n;
ne->va_type = COW; ne->va_type = COW;
...@@ -233,7 +234,7 @@ vmap::copy(int share) ...@@ -233,7 +234,7 @@ vmap::copy(int share)
auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start); auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
for (auto x __attribute__((unused)): span) for (auto x __attribute__((unused)): span)
assert(0); /* span must be empty */ assert(0); /* span must be empty */
span.replace(new range(&nm->cr, ne->vma_start, ne->vma_end - ne->vma_start, ne, 0)); span.replace(ne);
} }
if (share) { if (share) {
...@@ -262,7 +263,7 @@ vmap::lookup(uptr start, uptr len) ...@@ -262,7 +263,7 @@ vmap::lookup(uptr start, uptr len)
range *r = cr.search(start, len); range *r = cr.search(start, len);
if (r != 0) { if (r != 0) {
vma *e = (struct vma *) (r->value); vma *e = (vma *) r;
if (e->vma_end <= e->vma_start) if (e->vma_end <= e->vma_start)
panic("malformed va"); panic("malformed va");
if (e->vma_start < start+len && e->vma_end > start) if (e->vma_start < start+len && e->vma_end > start)
...@@ -282,24 +283,22 @@ vmap::insert(vmnode *n, uptr vma_start) ...@@ -282,24 +283,22 @@ vmap::insert(vmnode *n, uptr vma_start)
u64 len = n->npages * PGSIZE; u64 len = n->npages * PGSIZE;
auto span = cr.search_lock(vma_start, len); auto span = cr.search_lock(vma_start, len);
for (auto r: span) { for (auto r: span) {
vma *rvma = (vma*) r->value; vma *rvma = (vma*) r;
cprintf("vmap::insert: overlap with 0x%lx--0x%lx\n", rvma->vma_start, rvma->vma_end); cprintf("vmap::insert: overlap with 0x%lx--0x%lx\n", rvma->vma_start, rvma->vma_end);
return -1; return -1;
} }
// XXX handle overlaps // XXX handle overlaps
e = new vma(); e = new vma(this, vma_start, vma_start+len);
if (e == 0) { if (e == 0) {
cprintf("vmap::insert: out of vmas\n"); cprintf("vmap::insert: out of vmas\n");
return -1; return -1;
} }
e->vma_start = vma_start;
e->vma_end = vma_start + len;
e->n = n; e->n = n;
n->ref++; n->ref++;
span.replace(new range(&cr, vma_start, len, e, 0)); span.replace(e);
} }
updatepages(pml4, e->vma_start, e->vma_end, [](atomic<pme_t> *p) { updatepages(pml4, e->vma_start, e->vma_end, [](atomic<pme_t> *p) {
...@@ -324,7 +323,7 @@ vmap::remove(uptr vma_start, uptr len) ...@@ -324,7 +323,7 @@ vmap::remove(uptr vma_start, uptr len)
auto span = cr.search_lock(vma_start, len); auto span = cr.search_lock(vma_start, len);
for (auto r: span) { for (auto r: span) {
vma *rvma = (vma*) r->value; vma *rvma = (vma*) r;
if (rvma->vma_start < vma_start || rvma->vma_end > vma_end) { if (rvma->vma_start < vma_start || rvma->vma_end > vma_end) {
cprintf("vmap::remove: partial unmap not supported\n"); cprintf("vmap::remove: partial unmap not supported\n");
return -1; return -1;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论