提交 28e654af 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

combine epoch and epoch_depth.

get rid of gc_epoch_lock.
上级 eddb2693
...@@ -57,9 +57,7 @@ struct proc : public rcu_freed { ...@@ -57,9 +57,7 @@ struct proc : public rcu_freed {
SLIST_HEAD(childlist, proc) childq; SLIST_HEAD(childlist, proc) childq;
SLIST_ENTRY(proc) child_next; SLIST_ENTRY(proc) child_next;
struct condvar cv; struct condvar cv;
std::atomic<u64> epoch; std::atomic<u64> epoch; // low 8 bits are depth count
struct spinlock gc_epoch_lock;
std::atomic<u64> epoch_depth;
char lockname[16]; char lockname[16];
int on_runq; int on_runq;
int cpu_pin; int cpu_pin;
...@@ -77,12 +75,11 @@ struct proc : public rcu_freed { ...@@ -77,12 +75,11 @@ struct proc : public rcu_freed {
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0), proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0), state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0), epoch_depth(0), cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0) on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{ {
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid); snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC); initlock(&lock, lockname+3, LOCKSTAT_PROC);
initlock(&gc_epoch_lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname); initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq)); memset(&childq, 0, sizeof(childq));
...@@ -95,7 +92,6 @@ struct proc : public rcu_freed { ...@@ -95,7 +92,6 @@ struct proc : public rcu_freed {
~proc() { ~proc() {
destroylock(&lock); destroylock(&lock);
destroylock(&gc_epoch_lock);
destroycondvar(&cv); destroycondvar(&cv);
} }
......
...@@ -132,20 +132,22 @@ gc_delayfreelist(void) ...@@ -132,20 +132,22 @@ gc_delayfreelist(void)
if (gc_debug) { if (gc_debug) {
cprintf("(%d,%d) (%s): min %lu global %lu\n", myproc()->cpuid, myproc()->pid, myproc()->name, min, global); cprintf("(%d,%d) (%s): min %lu global %lu\n", myproc()->cpuid, myproc()->pid, myproc()->name, min, global);
} }
myproc()->epoch_depth++; // ensure enumerate's call to gc_begin_epoch doesn't have sideeffects myproc()->epoch++; // ensure enumerate's call to gc_begin_epoch doesn't have sideeffects
xnspid->enumerate([&min](u32, proc *p)->bool{ xnspid->enumerate([&min](u32, proc *p)->bool{
// Some threads may never call begin/end_epoch(), and never update // Some threads may never call begin/end_epoch(), and never update
// p->epoch, so gc_thread does it for them. XXX get rid off lock? // p->epoch, so gc_thread does it for them.
acquire(&p->gc_epoch_lock); u64 x = p->epoch.load();
if (p->epoch_depth == 0) if (!(x & 0xff)) {
p->epoch = global_epoch.load(); cmpxch(&p->epoch, x, global_epoch.load() << 8);
release(&p->gc_epoch_lock); x = p->epoch.load();
}
// cprintf("gc_min %d(%s): %lu %ld\n", p->pid, p->name, p->epoch, p->epoch_depth); // cprintf("gc_min %d(%s): %lu %ld\n", p->pid, p->name, p->epoch, p->epoch_depth);
if (min > p->epoch) if (min > (x>>8))
min = p->epoch; min = (x>>8);
return false; return false;
}); });
myproc()->epoch_depth--; myproc()->epoch--;
if (min >= global) { if (min >= global) {
gc_move_to_tofree(min); gc_move_to_tofree(min);
} }
...@@ -158,7 +160,7 @@ gc_delayed(rcu_freed *e) ...@@ -158,7 +160,7 @@ gc_delayed(rcu_freed *e)
int c = mycpu()->id; int c = mycpu()->id;
gc_state[c].ndelayed++; gc_state[c].ndelayed++;
u64 myepoch = myproc()->epoch; u64 myepoch = (myproc()->epoch >> 8);
u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch; u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch;
if (gc_debug) if (gc_debug)
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid,
...@@ -176,24 +178,20 @@ void ...@@ -176,24 +178,20 @@ void
gc_begin_epoch(void) gc_begin_epoch(void)
{ {
if (myproc() == NULL) return; if (myproc() == NULL) return;
acquire(&myproc()->gc_epoch_lock); u64 v = myproc()->epoch++;
if (myproc()->epoch_depth++ > 0) if (v & 0xff)
goto done; return;
myproc()->epoch = global_epoch.load(); // not atomic, but it never goes backwards
cmpxch(&myproc()->epoch, v+1, (global_epoch.load()<<8)+1);
// __sync_synchronize(); // __sync_synchronize();
done:
(void) 0;
release(&myproc()->gc_epoch_lock);
} }
void void
gc_end_epoch(void) gc_end_epoch(void)
{ {
if (myproc() == NULL) return; if (myproc() == NULL) return;
acquire(&myproc()->gc_epoch_lock); u64 e = --myproc()->epoch;
--myproc()->epoch_depth; if ((e & 0xff) == 0 && gc_state[mycpu()->id].ndelayed > NGC)
release(&myproc()->gc_epoch_lock);
if (myproc()->epoch_depth == 0 && gc_state[mycpu()->id].ndelayed > NGC)
cv_wakeup(&gc_state[mycpu()->id].cv); cv_wakeup(&gc_state[mycpu()->id].cv);
} }
...@@ -221,7 +219,7 @@ gc_worker(void *x) ...@@ -221,7 +219,7 @@ gc_worker(void *x)
release(&wl); release(&wl);
gc_state[mycpu()->id].nrun++; gc_state[mycpu()->id].nrun++;
u64 global = global_epoch; u64 global = global_epoch;
myproc()->epoch = global_epoch.load(); // move the gc thread to next epoch myproc()->epoch = global_epoch.load() << 8; // move the gc thread to next epoch
for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) { for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) {
int nfree = gc_free_tofreelist(&gc_state[mycpu()->id].tofree[i%NEPOCH].head, i); int nfree = gc_free_tofreelist(&gc_state[mycpu()->id].tofree[i%NEPOCH].head, i);
gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH; gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH;
...@@ -239,9 +237,7 @@ gc_worker(void *x) ...@@ -239,9 +237,7 @@ gc_worker(void *x)
void void
initprocgc(struct proc *p) initprocgc(struct proc *p)
{ {
p->epoch = global_epoch.load(); p->epoch = global_epoch.load() << 8;
p->epoch_depth = 0;
initlock(&p->gc_epoch_lock, "per process gc_lock", 0);
} }
void void
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论