vmap::tryinc tries to get a reference on a rcu_freed vmap

上级 30028bd5
...@@ -7,7 +7,7 @@ struct padded_length { ...@@ -7,7 +7,7 @@ struct padded_length {
#if defined (XV6_KERNEL) #if defined (XV6_KERNEL)
struct uwq { struct uwq {
uwq(padded_length *len); uwq(vmap* vmap, padded_length *len);
~uwq(); ~uwq();
bool haswork(); bool haswork();
int trywork(); int trywork();
...@@ -19,6 +19,7 @@ private: ...@@ -19,6 +19,7 @@ private:
proc* getworker(); proc* getworker();
struct spinlock lock_; struct spinlock lock_;
vmap* vmap_;
padded_length* len_; padded_length* len_;
struct worker { struct worker {
......
...@@ -81,6 +81,8 @@ struct vmap : public rcu_freed { ...@@ -81,6 +81,8 @@ struct vmap : public rcu_freed {
bool replace_vma(vma *a, vma *b); bool replace_vma(vma *a, vma *b);
void decref(); void decref();
bool tryinc();
vmap* copy(int share); vmap* copy(int share);
vma* lookup(uptr start, uptr len); vma* lookup(uptr start, uptr len);
int insert(vmnode *n, uptr va_start, int dotlb); int insert(vmnode *n, uptr va_start, int dotlb);
......
...@@ -25,7 +25,7 @@ uwq_trywork(void) ...@@ -25,7 +25,7 @@ uwq_trywork(void)
continue; continue;
struct cpu *c = &cpus[j]; struct cpu *c = &cpus[j];
scoped_gc_epoch gcx(); scoped_gc_epoch xgc();
barrier(); barrier();
struct proc *p = c->proc; struct proc *p = c->proc;
if (p == nullptr || p->vmap == nullptr) if (p == nullptr || p->vmap == nullptr)
...@@ -45,8 +45,8 @@ uwq_trywork(void) ...@@ -45,8 +45,8 @@ uwq_trywork(void)
// //
// uwq // uwq
// //
uwq::uwq(padded_length *len) uwq::uwq(vmap* vmap, padded_length *len)
: len_(len) : vmap_(vmap), len_(len)
{ {
if (len_ != nullptr) { if (len_ != nullptr) {
for (int i = 0; i < NCPU; i++) for (int i = 0; i < NCPU; i++)
...@@ -63,6 +63,7 @@ uwq::~uwq(void) ...@@ -63,6 +63,7 @@ uwq::~uwq(void)
{ {
if (len_ != nullptr) if (len_ != nullptr)
ksfree(slab_userwq, len_); ksfree(slab_userwq, len_);
// XXX(sbw) clean up worker procs
} }
bool bool
...@@ -88,6 +89,12 @@ uwq::trywork(void) ...@@ -88,6 +89,12 @@ uwq::trywork(void)
if (p == nullptr) if (p == nullptr)
return -1; return -1;
if (!vmap_->tryinc())
return -1;
// XXX(sbw)
vmap_->decref();
panic("XXX");
return 0; return 0;
} }
...@@ -114,8 +121,12 @@ uwq::getworker(void) ...@@ -114,8 +121,12 @@ uwq::getworker(void)
} }
if (slot != -1) { if (slot != -1) {
proc* p = allocproc();
panic("XXX"); if (p != nullptr) {
worker_[slot].proc = p;
worker_[slot].running = 1;
return worker_[slot].proc;
}
} }
return nullptr; return nullptr;
......
...@@ -145,7 +145,8 @@ vmap::alloc(void) ...@@ -145,7 +145,8 @@ vmap::alloc(void)
return new vmap(); return new vmap();
} }
vmap::vmap() : rcu_freed("vm"), vmap::vmap() :
rcu_freed("vm"),
#if VM_CRANGE #if VM_CRANGE
cr(10), cr(10),
#endif #endif
...@@ -153,7 +154,7 @@ vmap::vmap() : rcu_freed("vm"), ...@@ -153,7 +154,7 @@ vmap::vmap() : rcu_freed("vm"),
rx(PGSHIFT), rx(PGSHIFT),
#endif #endif
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)), ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)),
brk_(0), uwq_((padded_length*) ksalloc(slab_userwq)) brk_(0), uwq_(this, (padded_length*) ksalloc(slab_userwq))
{ {
initlock(&brklock_, "brk_lock", LOCKSTAT_VM); initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
if (pml4 == 0) { if (pml4 == 0) {
...@@ -202,6 +203,20 @@ vmap::decref() ...@@ -202,6 +203,20 @@ vmap::decref()
} }
bool bool
vmap::tryinc()
{
u64 o;
do {
o = ref.load();
if (o == 0)
return false;
} while (!cmpxch(&ref, o, o+1));
return true;
}
bool
vmap::replace_vma(vma *a, vma *b) vmap::replace_vma(vma *a, vma *b)
{ {
#if VM_CRANGE #if VM_CRANGE
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论