提交 0b085451 创建 作者: Frans Kaashoek's avatar Frans Kaashoek

Switch to gc based on fraser's reclaimation scheme

rcu -> gc
上级 266d673d
...@@ -49,7 +49,7 @@ OBJS = \ ...@@ -49,7 +49,7 @@ OBJS = \
pipe.o \ pipe.o \
proc.o \ proc.o \
prof.o \ prof.o \
rcu.o \ gc.o \
sampler.o \ sampler.o \
sched.o \ sched.o \
spinlock.o \ spinlock.o \
......
...@@ -64,7 +64,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -64,7 +64,7 @@ bget(u32 dev, u64 sector, int *writer)
loop: loop:
// Try for cached block. // Try for cached block.
// XXX ignore dev // XXX ignore dev
rcu_begin_read(); gc_begin_epoch();
b = ns_lookup(bufns, KII(dev, sector)); b = ns_lookup(bufns, KII(dev, sector));
if (b) { if (b) {
if (b->dev != dev || b->sector != sector) if (b->dev != dev || b->sector != sector)
...@@ -74,7 +74,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -74,7 +74,7 @@ bget(u32 dev, u64 sector, int *writer)
if (b->flags & B_BUSY) { if (b->flags & B_BUSY) {
cv_sleep(&b->cv, &b->lock); cv_sleep(&b->cv, &b->lock);
release(&b->lock); release(&b->lock);
rcu_end_read(); gc_end_epoch();
goto loop; goto loop;
} }
...@@ -86,7 +86,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -86,7 +86,7 @@ bget(u32 dev, u64 sector, int *writer)
// rcu_end_read() happens in brelse // rcu_end_read() happens in brelse
return b; return b;
} }
rcu_end_read(); gc_end_epoch();
// Allocate fresh block. // Allocate fresh block.
struct buf *victim = ns_enumerate(bufns, evict, 0); struct buf *victim = ns_enumerate(bufns, evict, 0);
...@@ -97,7 +97,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -97,7 +97,7 @@ bget(u32 dev, u64 sector, int *writer)
victim->flags |= B_BUSY; victim->flags |= B_BUSY;
ns_remove(bufns, KII(victim->dev, victim->sector), victim); ns_remove(bufns, KII(victim->dev, victim->sector), victim);
release(&victim->lock); release(&victim->lock);
rcu_delayed(victim, kmfree); gc_delayed(victim, kmfree);
b = kmalloc(sizeof(*b)); b = kmalloc(sizeof(*b));
b->dev = dev; b->dev = dev;
...@@ -107,9 +107,9 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -107,9 +107,9 @@ bget(u32 dev, u64 sector, int *writer)
snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector); snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector);
initlock(&b->lock, b->lockname+3); initlock(&b->lock, b->lockname+3);
initcondvar(&b->cv, b->lockname); initcondvar(&b->cv, b->lockname);
rcu_begin_read(); gc_begin_epoch();
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) { if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) {
rcu_delayed(b, kmfree); gc_delayed(b, kmfree);
goto loop; goto loop;
} }
// rcu_end_read() happens in brelse // rcu_end_read() happens in brelse
...@@ -155,7 +155,7 @@ brelse(struct buf *b, int writer) ...@@ -155,7 +155,7 @@ brelse(struct buf *b, int writer)
cv_wakeup(&b->cv); cv_wakeup(&b->cv);
} }
// rcu_begin_read() happens in bread // rcu_begin_read() happens in bread
rcu_end_read(); gc_end_epoch();
} }
void void
......
...@@ -48,7 +48,7 @@ free_node(void *p) ...@@ -48,7 +48,7 @@ free_node(void *p)
static void static void
rcu_free_node(node_t *n) rcu_free_node(node_t *n)
{ {
rcu_delayed(n, free_node); gc_delayed(n, free_node);
} }
static void static void
......
...@@ -169,7 +169,7 @@ exec(char *path, char **argv) ...@@ -169,7 +169,7 @@ exec(char *path, char **argv)
if((ip = namei(path)) == 0) if((ip = namei(path)) == 0)
return -1; return -1;
rcu_begin_read(); gc_begin_epoch();
// Check ELF header // Check ELF header
if(ip->type != T_FILE) if(ip->type != T_FILE)
...@@ -228,7 +228,7 @@ exec(char *path, char **argv) ...@@ -228,7 +228,7 @@ exec(char *path, char **argv)
switchuvm(myproc()); switchuvm(myproc());
vmap_decref(oldvmap); vmap_decref(oldvmap);
rcu_end_read(); gc_end_epoch();
prof_end(exec_prof); prof_end(exec_prof);
return 0; return 0;
...@@ -238,7 +238,7 @@ exec(char *path, char **argv) ...@@ -238,7 +238,7 @@ exec(char *path, char **argv)
vmap_decref(vmap); vmap_decref(vmap);
if(vmn) if(vmn)
vmn_free(vmn); vmn_free(vmn);
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
...@@ -244,17 +244,17 @@ iget(u32 dev, u32 inum) ...@@ -244,17 +244,17 @@ iget(u32 dev, u32 inum)
retry: retry:
// Try for cached inode. // Try for cached inode.
rcu_begin_read(); gc_begin_epoch();
ip = ns_lookup(ins, KII(dev, inum)); ip = ns_lookup(ins, KII(dev, inum));
if (ip) { if (ip) {
// tricky: first bump ref, then check free flag // tricky: first bump ref, then check free flag
__sync_fetch_and_add(&ip->ref, 1); __sync_fetch_and_add(&ip->ref, 1);
if (ip->flags & I_FREE) { if (ip->flags & I_FREE) {
rcu_end_read(); gc_end_epoch();
__sync_sub_and_fetch(&ip->ref, 1); __sync_sub_and_fetch(&ip->ref, 1);
goto retry; goto retry;
} }
rcu_end_read(); gc_end_epoch();
if (!(ip->flags & I_VALID)) { if (!(ip->flags & I_VALID)) {
acquire(&ip->lock); acquire(&ip->lock);
while((ip->flags & I_VALID) == 0) while((ip->flags & I_VALID) == 0)
...@@ -263,7 +263,7 @@ iget(u32 dev, u32 inum) ...@@ -263,7 +263,7 @@ iget(u32 dev, u32 inum)
} }
return ip; return ip;
} }
rcu_end_read(); gc_end_epoch();
// Allocate fresh inode cache slot. // Allocate fresh inode cache slot.
retry_evict: retry_evict:
...@@ -282,7 +282,7 @@ iget(u32 dev, u32 inum) ...@@ -282,7 +282,7 @@ iget(u32 dev, u32 inum)
} }
release(&victim->lock); release(&victim->lock);
ns_remove(ins, KII(victim->dev, victim->inum), victim); ns_remove(ins, KII(victim->dev, victim->inum), victim);
rcu_delayed(victim, ifree); gc_delayed(victim, ifree);
} else { } else {
if (!__sync_bool_compare_and_swap(&icache_free[mycpu()->id].x, cur_free, cur_free-1)) if (!__sync_bool_compare_and_swap(&icache_free[mycpu()->id].x, cur_free, cur_free-1))
goto retry_evict; goto retry_evict;
...@@ -299,7 +299,7 @@ iget(u32 dev, u32 inum) ...@@ -299,7 +299,7 @@ iget(u32 dev, u32 inum)
initcondvar(&ip->cv, ip->lockname); initcondvar(&ip->cv, ip->lockname);
ip->dir = 0; ip->dir = 0;
if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) { if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) {
rcu_delayed(ip, kmfree); gc_delayed(ip, kmfree);
goto retry; goto retry;
} }
...@@ -405,7 +405,7 @@ iput(struct inode *ip) ...@@ -405,7 +405,7 @@ iput(struct inode *ip)
iupdate(ip); iupdate(ip);
ns_remove(ins, KII(ip->dev, ip->inum), ip); ns_remove(ins, KII(ip->dev, ip->inum), ip);
rcu_delayed(ip, ifree); gc_delayed(ip, ifree);
__sync_fetch_and_add(&icache_free[mycpu()->id].x, 1); __sync_fetch_and_add(&icache_free[mycpu()->id].x, 1);
return; return;
} }
...@@ -473,7 +473,7 @@ itrunc(struct inode *ip) ...@@ -473,7 +473,7 @@ itrunc(struct inode *ip)
for(i = 0; i < NDIRECT; i++){ for(i = 0; i < NDIRECT; i++){
if(ip->addrs[i]){ if(ip->addrs[i]){
rcu_delayed2(ip->dev, ip->addrs[i], bfree); gc_delayed2(ip->dev, ip->addrs[i], bfree);
ip->addrs[i] = 0; ip->addrs[i] = 0;
} }
} }
...@@ -483,10 +483,10 @@ itrunc(struct inode *ip) ...@@ -483,10 +483,10 @@ itrunc(struct inode *ip)
a = (u32*)bp->data; a = (u32*)bp->data;
for(j = 0; j < NINDIRECT; j++){ for(j = 0; j < NINDIRECT; j++){
if(a[j]) if(a[j])
rcu_delayed2(ip->dev, a[j], bfree); gc_delayed2(ip->dev, a[j], bfree);
} }
brelse(bp, 0); brelse(bp, 0);
rcu_delayed2(ip->dev, ip->addrs[NDIRECT], bfree); gc_delayed2(ip->dev, ip->addrs[NDIRECT], bfree);
ip->addrs[NDIRECT] = 0; ip->addrs[NDIRECT] = 0;
} }
...@@ -713,7 +713,7 @@ namex(char *path, int nameiparent, char *name) ...@@ -713,7 +713,7 @@ namex(char *path, int nameiparent, char *name)
//cprintf("namex %s\n", path); //cprintf("namex %s\n", path);
rcu_begin_read(); gc_begin_epoch();
if(*path == '/') if(*path == '/')
ip = iget(ROOTDEV, ROOTINO); ip = iget(ROOTDEV, ROOTINO);
else else
...@@ -726,17 +726,17 @@ namex(char *path, int nameiparent, char *name) ...@@ -726,17 +726,17 @@ namex(char *path, int nameiparent, char *name)
panic("namex"); panic("namex");
if(ip->type != T_DIR){ if(ip->type != T_DIR){
iput(ip); iput(ip);
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
if(nameiparent && *path == '\0'){ if(nameiparent && *path == '\0'){
// Stop one level early. // Stop one level early.
rcu_end_read(); gc_end_epoch();
return ip; return ip;
} }
if((next = dirlookup(ip, name)) == 0){ if((next = dirlookup(ip, name)) == 0){
iput(ip); iput(ip);
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
iput(ip); iput(ip);
...@@ -745,10 +745,10 @@ namex(char *path, int nameiparent, char *name) ...@@ -745,10 +745,10 @@ namex(char *path, int nameiparent, char *name)
} }
if(nameiparent){ if(nameiparent){
iput(ip); iput(ip);
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
rcu_end_read(); gc_end_epoch();
return ip; return ip;
} }
......
#include "types.h"
#include "kernel.h"
#include "mmu.h"
#include "amd64.h"
#include "spinlock.h"
#include "condvar.h"
#include "queue.h"
#include "proc.h"
#include "cpu.h"
#include "kmtrace.h"
#define NEPOCH 4
static struct { struct spinlock l __mpalign__; } rcu_lock[NCPU];
static struct { struct condvar cv __mpalign__; } rcu_cv[NCPU];
struct gc {
u64 epoch;
struct gc *next;
union {
struct {
void (*dofree)(void *);
void *item;
} f1;
struct {
void (*dofree)(int, u64);
int arg1;
u64 arg2;
} f2;
};
int type;
} __mpalign__;
struct gc gc_epoch[NEPOCH] __mpalign__;
u64 global_epoch __mpalign__;
int ndelayed __mpalign__;
enum { rcu_debug = 0 };
struct gc *
gc_alloc()
{
return kmalloc(sizeof(struct gc));
}
void *
gc_min(void *vkey, void *v, void *arg){
u64 *min_epoch_p = arg;
struct proc *p = (struct proc *) v;
if (*min_epoch_p > p->epoch) {
*min_epoch_p = p->epoch;
}
return 0;
}
void
gc_free_elem(struct gc *r)
{
switch (r->type) {
case 1:
r->f1.dofree(r->f1.item);
break;
case 2:
r->f2.dofree(r->f2.arg1, r->f2.arg2);
break;
default:
panic("rcu type");
}
kmfree(r);
}
// Fraser's reclaimation scheme: free all delayed-free items in global_epoch-2
static void
gc_free_epoch(u64 epoch)
{
if (__sync_bool_compare_and_swap(&global_epoch, epoch, epoch+1)) {
// only one core succeeds; that core in charge of freeing epoch
struct gc *head;
struct gc *r, *nr;
uint32 fe = (epoch - (NEPOCH-2)) % NEPOCH;
int cas;
if (gc_epoch[fe].epoch != epoch - (NEPOCH-2))
panic("gc_free_epoch");
// unhook list for fe epoch atomically
head = gc_epoch[fe].next;
// this shouldn't fail, because no core is modifying it.
cas = __sync_bool_compare_and_swap(&gc_epoch[fe].next, head, 0);
if (!cas) panic("gc_free_epoch");
// free list items on the delayed list
for (r = head; r != NULL; r = nr) {
if (r->epoch > epoch-(NEPOCH-2)) {
cprintf("%lu %lu\n", r->epoch, epoch-(NEPOCH-2));
panic("gc_free_epoch");
}
nr = r->next;
gc_free_elem(r);
int x = __sync_fetch_and_sub(&ndelayed, 1);
if (x < 0) panic("gc_free_epoch");
}
if (gc_epoch[fe].next != 0)
panic("gc_free_epoch");
gc_epoch[fe].epoch = gc_epoch[fe].epoch + NEPOCH;
}
}
void
gc(void)
{
u64 global = global_epoch;
u64 min = global;
ns_enumerate(nspid, gc_min, &min);
// cprintf("gc: global %lu min %lu ndelay %d\n", global_epoch, min, ndelayed);
if (min >= global) {
gc_free_epoch(min);
}
}
void
gc_worker(void)
{
release(&myproc()->lock); // initially held by scheduler
mtstart(rcu_gc_worker, myproc());
struct spinlock wl;
initlock(&wl, "rcu_gc_worker"); // dummy lock
for (;;) {
gc();
acquire(&wl);
cv_sleep(&rcu_cv[mycpu()->id].cv, &wl);
release(&wl);
}
}
void
gc_start(void)
{
cv_wakeup(&rcu_cv[mycpu()->id].cv);
}
static void
gc_delayed_int(struct gc *r)
{
pushcli();
u64 myepoch = myproc()->epoch;
u64 minepoch = gc_epoch[myepoch % NEPOCH].epoch;
// cprintf("%d: gc_delayed: %lu ndelayed %d\n", myproc()->pid, global_epoch, ndelayed);
if (myepoch != minepoch) {
cprintf("%d: myepoch %lu minepoch %lu\n", myproc()->pid, myepoch, minepoch);
panic("gc_delayed_int");
}
r->epoch = myepoch;
do {
r->next = gc_epoch[myepoch % NEPOCH].next;
} while (!__sync_bool_compare_and_swap(&(gc_epoch[myepoch % NEPOCH].next), r->next, r));
popcli();
}
void
gc_delayed(void *e, void (*dofree)(void *))
{
struct gc *r = gc_alloc();
if (r == 0)
panic("gc_delayed");
r->f1.dofree = dofree;
r->f1.item = e;
r->type = 1;
gc_delayed_int(r);
}
void
gc_delayed2(int a1, u64 a2, void (*dofree)(int,u64))
{
struct gc *r = gc_alloc();
if (r == 0)
panic("gc_delayed2");
r->f2.dofree = dofree;
r->f2.arg1 = a1;
r->f2.arg2 = a2;
r->type = 2;
gc_delayed_int(r);
}
void
gc_begin_epoch(void)
{
if (myproc() && myproc()->rcu_read_depth++ == 0)
myproc()->epoch = global_epoch;
__sync_synchronize();
}
void
gc_end_epoch(void)
{
if (myproc() && myproc()->rcu_read_depth > 0)
myproc()->rcu_read_depth--;
}
void
initgc(void)
{
for (int i = 0; i < NCPU; i++) {
initlock(&rcu_lock[i].l, "rcu");
initcondvar(&rcu_cv[i].cv, "rcu_gc_cv");
}
global_epoch = NEPOCH-2;
for (int i = 0; i < NEPOCH; i++)
gc_epoch[i].epoch = i;
}
...@@ -213,15 +213,13 @@ void profreset(void); ...@@ -213,15 +213,13 @@ void profreset(void);
void profdump(void); void profdump(void);
// rcu.c // rcu.c
void rcuinit(void); void initgc(void);
void rcu_begin_write(struct spinlock *); void gc_begin_epoch();
void rcu_end_write(struct spinlock *); void gc_end_epoch();
void rcu_begin_read(void); void gc_delayed(void*, void (*dofree)(void*));
void rcu_end_read(void); void gc_delayed2(int, u64, void (*dofree)(int, u64));
void rcu_delayed(void*, void (*dofree)(void*)); void gc_start(void);
void rcu_delayed2(int, u64, void (*dofree)(int, u64)); void gc_worker(void);
void rcu_gc(void);
void rcu_gc_worker(void);
// sampler.c // sampler.c
void sampstart(void); void sampstart(void);
......
...@@ -93,7 +93,7 @@ cmain(void) ...@@ -93,7 +93,7 @@ cmain(void)
initlapic(); initlapic();
initkalloc(); initkalloc();
initrcu(); // initialize rcu module initgc(); // initialize rcu module
initproc(); // process table initproc(); // process table
initbio(); // buffer cache initbio(); // buffer cache
initinode(); // inode cache initinode(); // inode cache
......
...@@ -74,7 +74,7 @@ nsfree(struct ns *ns) ...@@ -74,7 +74,7 @@ nsfree(struct ns *ns)
{ {
if (ns_enumerate(ns, &any, 0)) if (ns_enumerate(ns, &any, 0))
panic("nsfree: not empty"); panic("nsfree: not empty");
rcu_delayed(ns, kmfree); gc_delayed(ns, kmfree);
} }
static struct elem * static struct elem *
...@@ -192,7 +192,7 @@ ns_insert(struct ns *ns, struct nskey key, void *val) ...@@ -192,7 +192,7 @@ ns_insert(struct ns *ns, struct nskey key, void *val)
setkey(e, &key); setkey(e, &key);
e->val = val; e->val = val;
u64 i = h(&key); u64 i = h(&key);
rcu_begin_write(0); gc_begin_epoch();
retry: retry:
(void) 0; (void) 0;
...@@ -200,8 +200,8 @@ ns_insert(struct ns *ns, struct nskey key, void *val) ...@@ -200,8 +200,8 @@ ns_insert(struct ns *ns, struct nskey key, void *val)
if (!ns->allowdup) { if (!ns->allowdup) {
for (struct elem *x = root; x; x = x->next) { for (struct elem *x = root; x; x = x->next) {
if (cmpkey(x, &key)) { if (cmpkey(x, &key)) {
rcu_end_write(0); gc_end_epoch();
rcu_delayed(e, kmfree); gc_delayed(e, kmfree);
return -1; return -1;
} }
} }
...@@ -211,7 +211,7 @@ ns_insert(struct ns *ns, struct nskey key, void *val) ...@@ -211,7 +211,7 @@ ns_insert(struct ns *ns, struct nskey key, void *val)
if (!__sync_bool_compare_and_swap(&ns->table[i].chain, root, e)) if (!__sync_bool_compare_and_swap(&ns->table[i].chain, root, e))
goto retry; goto retry;
rcu_end_write(0); gc_end_epoch();
return 0; return 0;
} }
return -1; return -1;
...@@ -222,17 +222,17 @@ ns_lookup(struct ns *ns, struct nskey key) ...@@ -222,17 +222,17 @@ ns_lookup(struct ns *ns, struct nskey key)
{ {
u64 i = h(&key); u64 i = h(&key);
rcu_begin_read(); gc_begin_epoch();
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e != NULL) { while (e != NULL) {
if (cmpkey(e, &key)) { if (cmpkey(e, &key)) {
rcu_end_read(); gc_end_epoch();
return e->val; return e->val;
} }
e = e->next; e = e->next;
} }
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
...@@ -241,7 +241,7 @@ void* ...@@ -241,7 +241,7 @@ void*
ns_remove(struct ns *ns, struct nskey key, void *v) ns_remove(struct ns *ns, struct nskey key, void *v)
{ {
u64 i = h(&key); u64 i = h(&key);
rcu_begin_write(0); gc_begin_epoch();
retry: retry:
(void) 0; (void) 0;
...@@ -270,34 +270,34 @@ ns_remove(struct ns *ns, struct nskey key, void *v) ...@@ -270,34 +270,34 @@ ns_remove(struct ns *ns, struct nskey key, void *v)
*pelock = 0; *pelock = 0;
void *v = e->val; void *v = e->val;
rcu_end_write(0); gc_end_epoch();
rcu_delayed(e, kmfree); gc_delayed(e, kmfree);
return v; return v;
} }
pe = &e->next; pe = &e->next;
} }
rcu_end_write(0); gc_end_epoch();
return 0; return 0;
} }
void * void *
ns_enumerate(struct ns *ns, void *(*f)(void *, void *, void *), void *arg) ns_enumerate(struct ns *ns, void *(*f)(void *, void *, void *), void *arg)
{ {
rcu_begin_read(); gc_begin_epoch();
for (int i = 0; i < NHASH; i++) { for (int i = 0; i < NHASH; i++) {
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e != NULL) { while (e != NULL) {
void *r = (*f)(&e->ikey, e->val, arg); void *r = (*f)(&e->ikey, e->val, arg);
if (r) { if (r) {
rcu_end_read(); gc_end_epoch();
return r; return r;
} }
e = e->next; e = e->next;
} }
} }
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
...@@ -305,18 +305,18 @@ void * ...@@ -305,18 +305,18 @@ void *
ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *, void *), void *arg) ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *, void *), void *arg)
{ {
u64 i = h(&key); u64 i = h(&key);
rcu_begin_read(); gc_begin_epoch();
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e) { while (e) {
if (cmpkey(e, &key)) { if (cmpkey(e, &key)) {
void *r = (*f)(e->val, arg); void *r = (*f)(e->val, arg);
if (r) { if (r) {
rcu_end_read(); gc_end_epoch();
return r; return r;
} }
} }
e = e->next; e = e->next;
} }
rcu_end_read(); gc_end_epoch();
return 0; return 0;
} }
...@@ -184,7 +184,7 @@ allocproc(void) ...@@ -184,7 +184,7 @@ allocproc(void)
p->state = EMBRYO; p->state = EMBRYO;
p->pid = ns_allockey(nspid); p->pid = ns_allockey(nspid);
p->epoch = INF; p->epoch = 0;
p->cpuid = mycpu()->id; p->cpuid = mycpu()->id;
p->on_runq = -1; p->on_runq = -1;
p->cpu_pin = 0; p->cpu_pin = 0;
...@@ -204,7 +204,7 @@ allocproc(void) ...@@ -204,7 +204,7 @@ allocproc(void)
if((p->kstack = ksalloc()) == 0){ if((p->kstack = ksalloc()) == 0){
if (ns_remove(nspid, KI(p->pid), p) == 0) if (ns_remove(nspid, KI(p->pid), p) == 0)
panic("allocproc: ns_remove"); panic("allocproc: ns_remove");
rcu_delayed(p, kmfree); gc_delayed(p, kmfree);
return 0; return 0;
} }
sp = p->kstack + KSTACKSIZE; sp = p->kstack + KSTACKSIZE;
...@@ -269,7 +269,7 @@ inituser(void) ...@@ -269,7 +269,7 @@ inituser(void)
for (u32 c = 0; c < NCPU; c++) { for (u32 c = 0; c < NCPU; c++) {
struct proc *rcup = allocproc(); struct proc *rcup = allocproc();
rcup->vmap = vmap_alloc(); rcup->vmap = vmap_alloc();
rcup->context->rip = (u64) rcu_gc_worker; rcup->context->rip = (u64) gc_worker;
rcup->cwd = 0; rcup->cwd = 0;
rcup->cpuid = c; rcup->cpuid = c;
rcup->cpu_pin = 1; rcup->cpu_pin = 1;
...@@ -346,7 +346,7 @@ scheduler(void) ...@@ -346,7 +346,7 @@ scheduler(void)
mtpause(schedp); mtpause(schedp);
if (p->context->rip != (uptr)forkret && if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)rcu_gc_worker) p->context->rip != (uptr)gc_worker)
{ {
mtresume(p); mtresume(p);
} }
...@@ -376,7 +376,7 @@ scheduler(void) ...@@ -376,7 +376,7 @@ scheduler(void)
int now = ticks; int now = ticks;
if (now - mycpu()->last_rcu_gc_ticks > 100) { if (now - mycpu()->last_rcu_gc_ticks > 100) {
rcu_gc(); gc_start();
mycpu()->last_rcu_gc_ticks = now; mycpu()->last_rcu_gc_ticks = now;
} }
...@@ -416,7 +416,7 @@ growproc(int n) ...@@ -416,7 +416,7 @@ growproc(int n)
// find first unallocated address in brk..brk+n // find first unallocated address in brk..brk+n
uptr newstart = myproc()->brk; uptr newstart = myproc()->brk;
u64 newn = n; u64 newn = n;
rcu_begin_read(); gc_begin_epoch();
while(newn > 0){ while(newn > 0){
struct vma *e = vmap_lookup(m, newstart, 1); struct vma *e = vmap_lookup(m, newstart, 1);
if(e == 0) if(e == 0)
...@@ -429,7 +429,7 @@ growproc(int n) ...@@ -429,7 +429,7 @@ growproc(int n)
newn -= e->va_end - newstart; newn -= e->va_end - newstart;
newstart = e->va_end; newstart = e->va_end;
} }
rcu_end_read(); gc_end_epoch();
if(newn <= 0){ if(newn <= 0){
// no need to allocate // no need to allocate
...@@ -573,7 +573,7 @@ fork(int flags) ...@@ -573,7 +573,7 @@ fork(int flags)
np->state = UNUSED; np->state = UNUSED;
if (ns_remove(nspid, KI(np->pid), np) == 0) if (ns_remove(nspid, KI(np->pid), np) == 0)
panic("fork: ns_remove"); panic("fork: ns_remove");
rcu_delayed(np, kmfree); gc_delayed(np, kmfree);
return -1; return -1;
} }
} else { } else {
...@@ -639,7 +639,7 @@ wait(void) ...@@ -639,7 +639,7 @@ wait(void)
p->parent = 0; p->parent = 0;
p->name[0] = 0; p->name[0] = 0;
p->killed = 0; p->killed = 0;
rcu_delayed(p, kmfree); gc_delayed(p, kmfree);
return pid; return pid;
} }
release(&p->lock); release(&p->lock);
......
...@@ -197,10 +197,10 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -197,10 +197,10 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W)) if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W))
return 0; return 0;
rcu_begin_read(); gc_begin_epoch();
struct vma *m = vmap_lookup(vmap, va, 1); struct vma *m = vmap_lookup(vmap, va, 1);
if (m == 0) { if (m == 0) {
rcu_end_read(); gc_end_epoch();
return -1; return -1;
} }
...@@ -213,7 +213,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -213,7 +213,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
if (m->va_type == COW && (err & FEC_WR)) { if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) { if (pagefault_wcow(vmap, va, pte, m, npg) < 0) {
release(&m->lock); release(&m->lock);
rcu_end_read(); gc_end_epoch();
return -1; return -1;
} }
} else if (m->va_type == COW) { } else if (m->va_type == COW) {
...@@ -227,7 +227,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -227,7 +227,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
// XXX(sbw) Why reload hardware page tables? // XXX(sbw) Why reload hardware page tables?
lcr3(v2p(vmap->pml4)); // Reload hardware page tables lcr3(v2p(vmap->pml4)); // Reload hardware page tables
release(&m->lock); release(&m->lock);
rcu_end_read(); gc_end_epoch();
return 1; return 1;
} }
...@@ -255,10 +255,10 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len) ...@@ -255,10 +255,10 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
char *buf = (char*)p; char *buf = (char*)p;
while(len > 0){ while(len > 0){
uptr va0 = (uptr)PGROUNDDOWN(va); uptr va0 = (uptr)PGROUNDDOWN(va);
rcu_begin_read(); gc_begin_epoch();
struct vma *vma = vmap_lookup(vmap, va, 1); struct vma *vma = vmap_lookup(vmap, va, 1);
if(vma == 0) { if(vma == 0) {
rcu_end_read(); gc_end_epoch();
return -1; return -1;
} }
...@@ -275,7 +275,7 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len) ...@@ -275,7 +275,7 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
buf += n; buf += n;
va = va0 + PGSIZE; va = va0 + PGSIZE;
release(&vma->lock); release(&vma->lock);
rcu_end_read(); gc_end_epoch();
} }
return 0; return 0;
} }
...@@ -457,7 +457,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len) ...@@ -457,7 +457,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
return -1; return -1;
} }
m->root = tree_remove(m->root, va_start+len); m->root = tree_remove(m->root, va_start+len);
rcu_delayed(e, vma_free); gc_delayed(e, vma_free);
release(&m->lock); release(&m->lock);
return 0; return 0;
} }
...@@ -589,7 +589,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len) ...@@ -589,7 +589,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
cprintf("vmap_remove: partial unmap unsupported\n"); cprintf("vmap_remove: partial unmap unsupported\n");
return -1; return -1;
} }
rcu_delayed(m->e[i], vma_free); gc_delayed(m->e[i], vma_free);
m->e[i] = 0; m->e[i] = 0;
} }
} }
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论