提交 1d1e1ede 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

ns_enumerate callback arg

上级 880d3e09
...@@ -50,7 +50,7 @@ binit(void) ...@@ -50,7 +50,7 @@ binit(void)
} }
static void * static void *
evict(void *vkey, void *bp) evict(void *vkey, void *bp, void *arg)
{ {
struct buf *b = bp; struct buf *b = bp;
acquire(&b->lock); acquire(&b->lock);
...@@ -61,7 +61,7 @@ evict(void *vkey, void *bp) ...@@ -61,7 +61,7 @@ evict(void *vkey, void *bp)
} }
static void * static void *
evict_valid(void *vkey, void *bp) evict_valid(void *vkey, void *bp, void *arg)
{ {
struct buf *b = bp; struct buf *b = bp;
acquire(&b->lock); acquire(&b->lock);
...@@ -107,9 +107,9 @@ bget(uint dev, uint sector, int *writer) ...@@ -107,9 +107,9 @@ bget(uint dev, uint sector, int *writer)
rcu_end_read(); rcu_end_read();
// Allocate fresh block. // Allocate fresh block.
struct buf *victim = ns_enumerate(bufns, evict); struct buf *victim = ns_enumerate(bufns, evict, 0);
if (victim == 0) if (victim == 0)
victim = ns_enumerate(bufns, evict_valid); victim = ns_enumerate(bufns, evict_valid, 0);
if (victim == 0) if (victim == 0)
panic("bget all busy"); panic("bget all busy");
victim->flags |= B_BUSY; victim->flags |= B_BUSY;
......
...@@ -141,8 +141,8 @@ int ns_allockey(struct ns*); ...@@ -141,8 +141,8 @@ int ns_allockey(struct ns*);
int ns_insert(struct ns*, struct nskey key, void*); int ns_insert(struct ns*, struct nskey key, void*);
void* ns_lookup(struct ns*, struct nskey key); void* ns_lookup(struct ns*, struct nskey key);
void* ns_remove(struct ns *ns, struct nskey key, void *val); // removed val void* ns_remove(struct ns *ns, struct nskey key, void *val); // removed val
void* ns_enumerate(struct ns *ns, void *(*f)(void *, void *)); void* ns_enumerate(struct ns *ns, void *(*f)(void *, void *, void *), void *arg);
void* ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *)); void* ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *, void *), void *arg);
// picirq.c // picirq.c
void picenable(int); void picenable(int);
......
...@@ -205,7 +205,7 @@ iupdate(struct inode *ip) ...@@ -205,7 +205,7 @@ iupdate(struct inode *ip)
} }
static void * static void *
evict(void *vkey, void *p) evict(void *vkey, void *p, void *arg)
{ {
struct inode *ip = p; struct inode *ip = p;
if (ip->ref || ip->type == T_DIR) if (ip->ref || ip->type == T_DIR)
...@@ -271,7 +271,7 @@ iget(uint dev, uint inum) ...@@ -271,7 +271,7 @@ iget(uint dev, uint inum)
// Allocate fresh inode cache slot. // Allocate fresh inode cache slot.
retry_evict: retry_evict:
(void) 0; (void) 0;
struct inode *victim = ns_enumerate(ins, evict); struct inode *victim = ns_enumerate(ins, evict, 0);
if (!victim) if (!victim)
panic("iget out of space"); panic("iget out of space");
// tricky: first flag as free, then check refcnt, then remove from ns // tricky: first flag as free, then check refcnt, then remove from ns
...@@ -587,6 +587,17 @@ dir_init(struct inode *dp) ...@@ -587,6 +587,17 @@ dir_init(struct inode *dp)
} }
} }
void
dir_flush(struct inode *dp)
{
if (!dp->dir)
return;
ilock(dp, 1);
iunlock(dp);
}
// Look for a directory entry in a directory. // Look for a directory entry in a directory.
struct inode* struct inode*
dirlookup(struct inode *dp, char *name) dirlookup(struct inode *dp, char *name)
......
...@@ -65,7 +65,7 @@ nsalloc(int allowdup) ...@@ -65,7 +65,7 @@ nsalloc(int allowdup)
} }
static void * static void *
any(void *x, void *y) any(void *x, void *y, void *arg)
{ {
return (void*) 1; return (void*) 1;
} }
...@@ -73,7 +73,7 @@ any(void *x, void *y) ...@@ -73,7 +73,7 @@ any(void *x, void *y)
void void
nsfree(struct ns *ns) nsfree(struct ns *ns)
{ {
if (ns_enumerate(ns, &any)) if (ns_enumerate(ns, &any, 0))
panic("nsfree: not empty"); panic("nsfree: not empty");
rcu_delayed(ns, kmfree); rcu_delayed(ns, kmfree);
} }
...@@ -284,13 +284,13 @@ ns_remove(struct ns *ns, struct nskey key, void *v) ...@@ -284,13 +284,13 @@ ns_remove(struct ns *ns, struct nskey key, void *v)
} }
void * void *
ns_enumerate(struct ns *ns, void *(*f)(void *, void *)) ns_enumerate(struct ns *ns, void *(*f)(void *, void *, void *), void *arg)
{ {
rcu_begin_read(); rcu_begin_read();
for (int i = 0; i < NHASH; i++) { for (int i = 0; i < NHASH; i++) {
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e != NULL) { while (e != NULL) {
void *r = (*f)(&e->ikey, e->val); void *r = (*f)(&e->ikey, e->val, arg);
if (r) { if (r) {
rcu_end_read(); rcu_end_read();
return r; return r;
...@@ -303,14 +303,14 @@ ns_enumerate(struct ns *ns, void *(*f)(void *, void *)) ...@@ -303,14 +303,14 @@ ns_enumerate(struct ns *ns, void *(*f)(void *, void *))
} }
void * void *
ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *)) ns_enumerate_key(struct ns *ns, struct nskey key, void *(*f)(void *, void *), void *arg)
{ {
uint i = h(&key); uint i = h(&key);
rcu_begin_read(); rcu_begin_read();
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e) { while (e) {
if (cmpkey(e, &key)) { if (cmpkey(e, &key)) {
void *r = (*f)(e->val); void *r = (*f)(e->val, arg);
if (r) { if (r) {
rcu_end_read(); rcu_end_read();
return r; return r;
......
...@@ -445,7 +445,7 @@ migrate(struct proc *p) ...@@ -445,7 +445,7 @@ migrate(struct proc *p)
} }
static void * static void *
steal_cb(void *vk, void *v) steal_cb(void *vk, void *v, void *arg)
{ {
struct proc *p = v; struct proc *p = v;
...@@ -474,7 +474,7 @@ steal_cb(void *vk, void *v) ...@@ -474,7 +474,7 @@ steal_cb(void *vk, void *v)
int int
steal(void) steal(void)
{ {
void *stole = ns_enumerate(nsrunq, steal_cb); void *stole = ns_enumerate(nsrunq, steal_cb, 0);
return stole ? 1 : 0; return stole ? 1 : 0;
} }
...@@ -488,7 +488,7 @@ steal(void) ...@@ -488,7 +488,7 @@ steal(void)
// via swtch back to the scheduler. // via swtch back to the scheduler.
static void * static void *
choose_runnable(void *pp) choose_runnable(void *pp, void *arg)
{ {
struct proc *p = pp; struct proc *p = pp;
if (p->state == RUNNABLE) if (p->state == RUNNABLE)
...@@ -514,7 +514,7 @@ scheduler(void) ...@@ -514,7 +514,7 @@ scheduler(void)
// Enable interrupts on this processor. // Enable interrupts on this processor.
sti(); sti();
struct proc *p = ns_enumerate_key(nsrunq, KI(cpu->id), choose_runnable); struct proc *p = ns_enumerate_key(nsrunq, KI(cpu->id), choose_runnable, 0);
if (p) { if (p) {
acquire(&p->lock); acquire(&p->lock);
if (p->state != RUNNABLE) { if (p->state != RUNNABLE) {
...@@ -653,7 +653,7 @@ kill(int pid) ...@@ -653,7 +653,7 @@ kill(int pid)
return 0; return 0;
} }
void *procdump(void *vk, void *v) void *procdump(void *vk, void *v, void *arg)
{ {
struct proc *p = (struct proc *) v; struct proc *p = (struct proc *) v;
...@@ -690,5 +690,5 @@ void *procdump(void *vk, void *v) ...@@ -690,5 +690,5 @@ void *procdump(void *vk, void *v)
void void
procdumpall(void) procdumpall(void)
{ {
ns_enumerate(nspid, procdump); ns_enumerate(nspid, procdump, 0);
} }
...@@ -30,7 +30,6 @@ TAILQ_HEAD(rcu_head, rcu); ...@@ -30,7 +30,6 @@ TAILQ_HEAD(rcu_head, rcu);
static struct { struct rcu_head x __attribute__((aligned (CACHELINE))); } rcu_q[NCPU]; static struct { struct rcu_head x __attribute__((aligned (CACHELINE))); } rcu_q[NCPU];
static uint global_epoch __attribute__ ((aligned (CACHELINE))); static uint global_epoch __attribute__ ((aligned (CACHELINE)));
static struct { uint x __attribute__((aligned (CACHELINE))); } min_epoch[NCPU];
static struct { struct spinlock l __attribute__((aligned (CACHELINE))); } rcu_lock[NCPU]; static struct { struct spinlock l __attribute__((aligned (CACHELINE))); } rcu_lock[NCPU];
static struct { int v __attribute__((aligned (CACHELINE))); } delayed_nfree[NCPU]; static struct { int v __attribute__((aligned (CACHELINE))); } delayed_nfree[NCPU];
...@@ -52,10 +51,11 @@ rcu_alloc() ...@@ -52,10 +51,11 @@ rcu_alloc()
} }
void * void *
rcu_min(void *vkey, void *v){ rcu_min(void *vkey, void *v, void *arg){
uint *min_epoch_p = arg;
struct proc *p = (struct proc *) v; struct proc *p = (struct proc *) v;
if (min_epoch[cpu->id].x > p->epoch) { if (*min_epoch_p > p->epoch) {
min_epoch[cpu->id].x = p->epoch; *min_epoch_p = p->epoch;
} }
return 0; return 0;
} }
...@@ -66,16 +66,16 @@ void ...@@ -66,16 +66,16 @@ void
rcu_gc(void) rcu_gc(void)
{ {
struct rcu *r, *nr; struct rcu *r, *nr;
uint min_epoch = global_epoch;
int n = 0; int n = 0;
min_epoch[cpu->id].x = global_epoch;
ns_enumerate(nspid, rcu_min); ns_enumerate(nspid, rcu_min, &min_epoch);
pushcli(); pushcli();
acquire(&rcu_lock[cpu->id].l); acquire(&rcu_lock[cpu->id].l);
for (r = TAILQ_FIRST(&rcu_q[cpu->id].x); r != NULL; r = nr) { for (r = TAILQ_FIRST(&rcu_q[cpu->id].x); r != NULL; r = nr) {
if (r->epoch >= min_epoch[cpu->id].x) if (r->epoch >= min_epoch)
break; break;
release(&rcu_lock[cpu->id].l); release(&rcu_lock[cpu->id].l);
...@@ -101,7 +101,7 @@ rcu_gc(void) ...@@ -101,7 +101,7 @@ rcu_gc(void)
release(&rcu_lock[cpu->id].l); release(&rcu_lock[cpu->id].l);
if (rcu_debug) if (rcu_debug)
cprintf("rcu_gc: cpu %d n %d delayed_nfree=%d min_epoch=%d\n", cprintf("rcu_gc: cpu %d n %d delayed_nfree=%d min_epoch=%d\n",
cpu->id, n, delayed_nfree[cpu->id], min_epoch[cpu->id]); cpu->id, n, delayed_nfree[cpu->id], min_epoch);
popcli(); popcli();
// global_epoch can be bumped anywhere; this seems as good a place as any // global_epoch can be bumped anywhere; this seems as good a place as any
......
...@@ -147,7 +147,7 @@ bad: ...@@ -147,7 +147,7 @@ bad:
// Is the directory dp empty except for "." and ".." ? // Is the directory dp empty except for "." and ".." ?
static void* static void*
check_empty(void *k, void *v) check_empty(void *k, void *v, void *arg)
{ {
char *name = k; char *name = k;
if (strcmp(name, ".") && strcmp(name, "..")) if (strcmp(name, ".") && strcmp(name, ".."))
...@@ -159,7 +159,7 @@ static int ...@@ -159,7 +159,7 @@ static int
isdirempty(struct inode *dp) isdirempty(struct inode *dp)
{ {
dir_init(dp); dir_init(dp);
if (ns_enumerate(dp->dir, check_empty)) if (ns_enumerate(dp->dir, check_empty, 0))
return 0; return 0;
return 1; return 1;
} }
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论