提交 822d634b 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

use namespaces for scheduler

上级 6b7ca768
...@@ -62,6 +62,7 @@ cv_wakeup(struct condvar *cv) ...@@ -62,6 +62,7 @@ cv_wakeup(struct condvar *cv)
p->cv_next = 0; p->cv_next = 0;
p->oncv = 0; p->oncv = 0;
addrun(p); addrun(p);
p->state = RUNNABLE;
release(&p->lock); release(&p->lock);
cv->waiters = nxt; cv->waiters = nxt;
} }
......
...@@ -105,8 +105,8 @@ int ns_allockey(struct ns*); ...@@ -105,8 +105,8 @@ int ns_allockey(struct ns*);
int ns_insert(struct ns*, int key, void*); int ns_insert(struct ns*, int key, void*);
void* ns_lookup(struct ns*, int); void* ns_lookup(struct ns*, int);
int ns_remove(struct ns *ns, int key, void *val); int ns_remove(struct ns *ns, int key, void *val);
void ns_enumerate(struct ns *ns, void (*f)(int, void *)); void* ns_enumerate(struct ns *ns, void *(*f)(int, void *));
void ns_enumerate_key(struct ns *ns, int key, void (*f)(void *)); void* ns_enumerate_key(struct ns *ns, int key, void *(*f)(void *));
// picirq.c // picirq.c
...@@ -134,7 +134,7 @@ void sched(void); ...@@ -134,7 +134,7 @@ void sched(void);
void userinit(void); void userinit(void);
int wait(void); int wait(void);
void yield(void); void yield(void);
void migrate(void); void migrate(struct proc *);
// rcu.c // rcu.c
void rcuinit(void); void rcuinit(void);
......
...@@ -131,7 +131,7 @@ exec(char *path, char **argv) ...@@ -131,7 +131,7 @@ exec(char *path, char **argv)
switchuvm(proc); switchuvm(proc);
vmap_decref(oldvmap); vmap_decref(oldvmap);
migrate(); migrate(proc);
return 0; return 0;
......
...@@ -101,7 +101,7 @@ ns_lookup(struct ns *ns, int key) ...@@ -101,7 +101,7 @@ ns_lookup(struct ns *ns, int key)
while (e != NULL) { while (e != NULL) {
if (e->key == key) { if (e->key == key) {
return e; return e->val;
} }
e = e->next; e = e->next;
} }
...@@ -138,31 +138,38 @@ ns_remove(struct ns *ns, int key, void *v) ...@@ -138,31 +138,38 @@ ns_remove(struct ns *ns, int key, void *v)
return -1; return -1;
} }
void void *
ns_enumerate(struct ns *ns, void (*f)(int, void *)) ns_enumerate(struct ns *ns, void *(*f)(int, void *))
{ {
rcu_begin_read(); rcu_begin_read();
for (int i = 0; i < NHASH; i++) { for (int i = 0; i < NHASH; i++) {
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e != NULL) { while (e != NULL) {
(*f)(e->key, e->val); void *r = (*f)(e->key, e->val);
if (r)
return r;
e = e->next; e = e->next;
} }
} }
rcu_end_read(); rcu_end_read();
return 0;
} }
void void *
ns_enumerate_key(struct ns *ns, int key, void (*f)(void *)) ns_enumerate_key(struct ns *ns, int key, void *(*f)(void *))
{ {
uint i = key % NHASH; uint i = key % NHASH;
rcu_begin_read(); rcu_begin_read();
struct elem *e = ns->table[i].chain; struct elem *e = ns->table[i].chain;
while (e) { while (e) {
if (e->key == key) if (e->key == key) {
(*f)(e->val); void *r = (*f)(e->val);
if (r)
return r;
}
e = e->next; e = e->next;
} }
rcu_end_read(); rcu_end_read();
return 0;
} }
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#include "proc.h" #include "proc.h"
#include "xv6-mtrace.h" #include "xv6-mtrace.h"
struct runq runqs[NCPU];
int __attribute__ ((aligned (CACHELINE))) idle[NCPU]; int __attribute__ ((aligned (CACHELINE))) idle[NCPU];
struct ns *nspid __attribute__ ((aligned (CACHELINE))); struct ns *nspid __attribute__ ((aligned (CACHELINE)));
struct ns *nsrunq __attribute__ ((aligned (CACHELINE)));
static struct proc *initproc __attribute__ ((aligned (CACHELINE))); static struct proc *initproc __attribute__ ((aligned (CACHELINE)));
extern void forkret(void); extern void forkret(void);
...@@ -27,13 +27,12 @@ pinit(void) ...@@ -27,13 +27,12 @@ pinit(void)
if (nspid == 0) if (nspid == 0)
panic("pinit"); panic("pinit");
for (c = 0; c < NCPU; c++) { nsrunq = nsalloc();
if (nsrunq == 0)
panic("pinit runq");
for (c = 0; c < NCPU; c++)
idle[c] = 1; idle[c] = 1;
runqs[c].name[0] = (char) (c + '0');
safestrcpy(runqs[c].name+1, "runq", MAXNAME-1);
initlock(&runqs[c].lock, runqs[c].name);
STAILQ_INIT(&runqs[c].runq);
}
} }
//PAGEBREAK: 32 //PAGEBREAK: 32
...@@ -56,6 +55,7 @@ allocproc(void) ...@@ -56,6 +55,7 @@ allocproc(void)
p->pid = ns_allockey(nspid); p->pid = ns_allockey(nspid);
p->epoch = INF; p->epoch = INF;
p->cpuid = cpu->id; p->cpuid = cpu->id;
p->on_runq = -1;
snprintf(p->lockname, sizeof(p->lockname), "proc:%d", p->pid); snprintf(p->lockname, sizeof(p->lockname), "proc:%d", p->pid);
initlock(&p->lock, p->lockname); initlock(&p->lock, p->lockname);
...@@ -89,17 +89,6 @@ allocproc(void) ...@@ -89,17 +89,6 @@ allocproc(void)
return p; return p;
} }
static void
addrun1(struct runq *rq, struct proc *p)
{
struct proc *q;
STAILQ_FOREACH(q, &rq->runq, run_next)
if (q == p)
panic("addrun1: already on queue");
p->state = RUNNABLE;
STAILQ_INSERT_TAIL(&rq->runq, p, run_next);
}
// Mark a process RUNNABLE and add it to the runq // Mark a process RUNNABLE and add it to the runq
// of its cpu. Caller must hold p->lock so that // of its cpu. Caller must hold p->lock so that
// some other core doesn't start running the // some other core doesn't start running the
...@@ -114,23 +103,11 @@ addrun(struct proc *p) ...@@ -114,23 +103,11 @@ addrun(struct proc *p)
if(!holding(&p->lock)) if(!holding(&p->lock))
panic("addrun no p->lock"); panic("addrun no p->lock");
#endif #endif
acquire(&runqs[p->cpuid].lock);
// cprintf("%d: addrun %d\n", cpu->id, p->pid);
addrun1(&runqs[p->cpuid], p);
release(&runqs[p->cpuid].lock);
}
static void if (p->on_runq >= 0)
delrun1(struct runq *rq, struct proc *p) panic("addrun on runq already");
{ ns_insert(nsrunq, p->cpuid, p);
struct proc *q, *nq; p->on_runq = p->cpuid;
STAILQ_FOREACH_SAFE(q, &rq->runq, run_next, nq) {
if (q == p) {
STAILQ_REMOVE(&rq->runq, q, proc, run_next);
return;
}
}
panic("delrun1: not on runq");
} }
void void
...@@ -140,10 +117,11 @@ delrun(struct proc *p) ...@@ -140,10 +117,11 @@ delrun(struct proc *p)
if(!holding(&p->lock)) if(!holding(&p->lock))
panic("delrun no p->lock"); panic("delrun no p->lock");
#endif #endif
acquire(&runq->lock);
// cprintf("%d: delrun %d\n", cpu->id, p->pid); if (p->on_runq < 0)
delrun1(runq, p); panic("delrun not on runq");
release(&runq->lock); ns_remove(nsrunq, p->on_runq, p);
p->on_runq = -1;
} }
//PAGEBREAK: 32 //PAGEBREAK: 32
...@@ -178,6 +156,7 @@ userinit(void) ...@@ -178,6 +156,7 @@ userinit(void)
p->cwd = 0; // forkret will fix in the process's context p->cwd = 0; // forkret will fix in the process's context
acquire(&p->lock); acquire(&p->lock);
addrun(p); addrun(p);
p->state = RUNNABLE;
release(&p->lock); release(&p->lock);
} }
...@@ -314,6 +293,7 @@ fork(int flags) ...@@ -314,6 +293,7 @@ fork(int flags)
acquire(&np->lock); acquire(&np->lock);
addrun(np); addrun(np);
np->state = RUNNABLE;
release(&np->lock); release(&np->lock);
// cprintf("%d: fork done (pid %d)\n", proc->pid, pid); // cprintf("%d: fork done (pid %d)\n", proc->pid, pid);
...@@ -423,59 +403,69 @@ wait(void) ...@@ -423,59 +403,69 @@ wait(void)
} }
void void
migrate(void) migrate(struct proc *p)
{ {
int c; int c;
struct proc *p;
for (c = 0; c < NCPU; c++) { for (c = 0; c < NCPU; c++) {
if (c == cpu->id) if (c == cpu->id)
continue; continue;
if (idle[c]) { // OK if there is a race if (idle[c]) { // OK if there is a race
// cprintf("migrate to %d\n", c); // cprintf("migrate to %d\n", c);
p = proc;
acquire(&p->lock); acquire(&p->lock);
if (p->state != RUNNABLE) {
release(&p->lock);
continue;
}
delrun(p);
p->curcycles = 0; p->curcycles = 0;
p->cpuid = c; p->cpuid = c;
addrun(p); addrun(p);
sched();
if (p == proc) {
proc->state = RUNNABLE;
sched();
}
release(&proc->lock); release(&proc->lock);
return; return;
} }
} }
} }
int static void *
steal(void) steal_cb(int k, void *v)
{ {
int c; struct proc *p = v;
struct proc *p;
acquire(&p->lock);
if (p->state != RUNNABLE) {
release(&p->lock);
return 0;
}
for (c = 0; c < NCPU; c++) { if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
if (c == cpu->id) // cprintf("%d: steal %d (%d) from %d\n", cpu->id, p->pid, p->curcycles, c);
continue; delrun(p);
acquire(&runqs[c].lock); p->curcycles = 0;
STAILQ_FOREACH(p, &runqs[c].runq, run_next) { p->cpuid = cpu->id;
acquire(&p->lock); addrun(p);
if (p->state != RUNNABLE) release(&p->lock);
panic("non-runnable proc on runq"); return p;
if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
// cprintf("%d: steal %d (%d) from %d\n", cpu->id, p->pid, p->curcycles, c);
delrun1(&runqs[c], p);
release(&runqs[c].lock);
p->curcycles = 0;
p->cpuid = cpu->id;
addrun(p);
release(&p->lock);
return 1;
}
release(&p->lock);
}
release(&runqs[c].lock);
} }
release(&p->lock);
return 0; return 0;
} }
int
steal(void)
{
void *stole = ns_enumerate(nsrunq, steal_cb);
return stole ? 1 : 0;
}
//PAGEBREAK: 42 //PAGEBREAK: 42
// Per-CPU process scheduler. // Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up. // Each CPU calls scheduler() after setting itself up.
...@@ -487,8 +477,6 @@ steal(void) ...@@ -487,8 +477,6 @@ steal(void)
void void
scheduler(void) scheduler(void)
{ {
struct proc *p;
// allocate a fake PID for each scheduler thread // allocate a fake PID for each scheduler thread
struct proc *schedp = allocproc(); struct proc *schedp = allocproc();
if (!schedp) if (!schedp)
...@@ -504,47 +492,39 @@ scheduler(void) ...@@ -504,47 +492,39 @@ scheduler(void)
// Enable interrupts on this processor. // Enable interrupts on this processor.
sti(); sti();
// Loop over process table looking for process to run. struct proc *p = ns_lookup(nsrunq, cpu->id);
acquire(&runq->lock); if (p) {
STAILQ_FOREACH(p, &runq->runq, run_next) {
acquire(&p->lock); acquire(&p->lock);
if(p->state != RUNNABLE) if (p->state != RUNNABLE) {
panic("non-runnable process on runq\n"); release(&p->lock);
} else {
if (idle[cpu->id])
idle[cpu->id] = 0;
STAILQ_REMOVE(&runq->runq, p, proc, run_next); // Switch to chosen process. It is the process's job
if (idle[cpu->id]) { // to release proc->lock and then reacquire it
// cprintf("%d: no longer idle, running %d\n", cpu->id, p->pid); // before jumping back to us.
idle[cpu->id] = 0; proc = p;
switchuvm(p);
p->state = RUNNING;
p->tsc = rdtsc();
mtrace_fcall_register(schedp->pid, 0, 0, mtrace_pause);
mtrace_fcall_register(proc->pid, 0, 0, mtrace_resume);
mtrace_call_set(1, cpu->id);
swtch(&cpu->scheduler, proc->context);
mtrace_fcall_register(schedp->pid, 0, 0, mtrace_resume);
mtrace_call_set(0, cpu->id);
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
proc = schedp;
if (p->state != RUNNABLE)
delrun(p);
release(&p->lock);
} }
} else {
release(&runq->lock);
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
proc = p;
switchuvm(p);
p->state = RUNNING;
p->tsc = rdtsc();
mtrace_fcall_register(schedp->pid, 0, 0, mtrace_pause);
mtrace_fcall_register(proc->pid, 0, 0, mtrace_resume);
mtrace_call_set(1, cpu->id);
swtch(&cpu->scheduler, proc->context);
mtrace_fcall_register(schedp->pid, 0, 0, mtrace_resume);
mtrace_call_set(0, cpu->id);
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
proc = schedp;
release(&p->lock);
break;
}
if(p==0) {
release(&runq->lock);
if (steal()) { if (steal()) {
if (idle[cpu->id]) if (idle[cpu->id])
idle[cpu->id] = 0; idle[cpu->id] = 0;
...@@ -598,7 +578,7 @@ void ...@@ -598,7 +578,7 @@ void
yield(void) yield(void)
{ {
acquire(&proc->lock); //DOC: yieldlock acquire(&proc->lock); //DOC: yieldlock
addrun(proc); proc->state = RUNNABLE;
sched(); sched();
release(&proc->lock); release(&proc->lock);
} }
...@@ -651,7 +631,7 @@ kill(int pid) ...@@ -651,7 +631,7 @@ kill(int pid)
return 0; return 0;
} }
void procdump(int k, void *v) void *procdump(int k, void *v)
{ {
struct proc *p = (struct proc *) v; struct proc *p = (struct proc *) v;
...@@ -678,6 +658,7 @@ void procdump(int k, void *v) ...@@ -678,6 +658,7 @@ void procdump(int k, void *v)
cprintf(" %p", pc[i]); cprintf(" %p", pc[i]);
} }
cprintf("\n"); cprintf("\n");
return 0;
} }
//PAGEBREAK: 36 //PAGEBREAK: 36
......
...@@ -93,6 +93,7 @@ struct proc { ...@@ -93,6 +93,7 @@ struct proc {
uint epoch; uint epoch;
uint rcu_read_depth; uint rcu_read_depth;
char lockname[16]; char lockname[16];
int on_runq;
}; };
// Process memory is laid out contiguously, low addresses first: // Process memory is laid out contiguously, low addresses first:
...@@ -116,13 +117,6 @@ struct cpu { ...@@ -116,13 +117,6 @@ struct cpu {
struct cpu *cpu; struct cpu *cpu;
struct proc *proc; // The currently-running process. struct proc *proc; // The currently-running process.
struct kmem *kmem; // The per-core memory table struct kmem *kmem; // The per-core memory table
struct runq *runq; // The per-core runq
} __attribute__ ((aligned (CACHELINE)));
struct runq {
char name[MAXNAME];
struct spinlock lock;
STAILQ_HEAD(runlist, proc) runq;
} __attribute__ ((aligned (CACHELINE))); } __attribute__ ((aligned (CACHELINE)));
struct condtab { struct condtab {
...@@ -132,7 +126,6 @@ struct condtab { ...@@ -132,7 +126,6 @@ struct condtab {
} __attribute__ ((aligned (CACHELINE))); } __attribute__ ((aligned (CACHELINE)));
extern struct cpu cpus[NCPU]; extern struct cpu cpus[NCPU];
extern struct runq runqs[NCPU];
extern struct condtab condtabs[NCPU]; extern struct condtab condtabs[NCPU];
extern int ncpu; extern int ncpu;
extern struct ns *nspid; extern struct ns *nspid;
...@@ -148,4 +141,3 @@ extern struct ns *nspid; ...@@ -148,4 +141,3 @@ extern struct ns *nspid;
extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()].cpu extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()].cpu
extern struct proc *proc __asm("%gs:4"); // cpus[cpunum()].proc extern struct proc *proc __asm("%gs:4"); // cpus[cpunum()].proc
extern struct kmem *kmem __asm("%gs:8"); // &cpu[cpunum()].kmem extern struct kmem *kmem __asm("%gs:8"); // &cpu[cpunum()].kmem
extern struct runq *runq __asm("%gs:12"); // &cpu[cpunum()].runq
...@@ -33,12 +33,13 @@ rcu_alloc() ...@@ -33,12 +33,13 @@ rcu_alloc()
return kmalloc(sizeof(struct rcu)); return kmalloc(sizeof(struct rcu));
} }
void void *
rcu_min(int key, void *v){ rcu_min(int key, void *v){
struct proc *p = (struct proc *) v; struct proc *p = (struct proc *) v;
if (min_epoch > p->epoch) { if (min_epoch > p->epoch) {
min_epoch = p->epoch; min_epoch = p->epoch;
} }
return 0;
} }
// XXX use atomic instruction to update list (instead of holding lock) // XXX use atomic instruction to update list (instead of holding lock)
......
...@@ -90,8 +90,8 @@ seginit(void) ...@@ -90,8 +90,8 @@ seginit(void)
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER); c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER); c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
// Map cpu, curproc, kmem, runq // Map cpu, curproc, kmem
c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 16, 0); c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 12, 0);
// lgt((void *) v2p((void*)(c->gdt)), sizeof(c->gdt)); // lgt((void *) v2p((void*)(c->gdt)), sizeof(c->gdt));
lgdt((void *)(c->gdt), sizeof(c->gdt)); lgdt((void *)(c->gdt), sizeof(c->gdt));
...@@ -101,7 +101,6 @@ seginit(void) ...@@ -101,7 +101,6 @@ seginit(void)
cpu = c; cpu = c;
proc = 0; proc = 0;
kmem = &kmems[cpunum()]; kmem = &kmems[cpunum()];
runq = &runqs[cpunum()];
} }
void void
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论