提交 822d634b 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

use namespaces for scheduler

上级 6b7ca768
......@@ -62,6 +62,7 @@ cv_wakeup(struct condvar *cv)
p->cv_next = 0;
p->oncv = 0;
addrun(p);
p->state = RUNNABLE;
release(&p->lock);
cv->waiters = nxt;
}
......
......@@ -105,8 +105,8 @@ int ns_allockey(struct ns*);
int ns_insert(struct ns*, int key, void*);
void* ns_lookup(struct ns*, int);
int ns_remove(struct ns *ns, int key, void *val);
void ns_enumerate(struct ns *ns, void (*f)(int, void *));
void ns_enumerate_key(struct ns *ns, int key, void (*f)(void *));
void* ns_enumerate(struct ns *ns, void *(*f)(int, void *));
void* ns_enumerate_key(struct ns *ns, int key, void *(*f)(void *));
// picirq.c
......@@ -134,7 +134,7 @@ void sched(void);
void userinit(void);
int wait(void);
void yield(void);
void migrate(void);
void migrate(struct proc *);
// rcu.c
void rcuinit(void);
......
......@@ -131,7 +131,7 @@ exec(char *path, char **argv)
switchuvm(proc);
vmap_decref(oldvmap);
migrate();
migrate(proc);
return 0;
......
......@@ -101,7 +101,7 @@ ns_lookup(struct ns *ns, int key)
while (e != NULL) {
if (e->key == key) {
return e;
return e->val;
}
e = e->next;
}
......@@ -138,31 +138,38 @@ ns_remove(struct ns *ns, int key, void *v)
return -1;
}
void
ns_enumerate(struct ns *ns, void (*f)(int, void *))
void *
ns_enumerate(struct ns *ns, void *(*f)(int, void *))
{
rcu_begin_read();
for (int i = 0; i < NHASH; i++) {
struct elem *e = ns->table[i].chain;
while (e != NULL) {
(*f)(e->key, e->val);
void *r = (*f)(e->key, e->val);
if (r)
return r;
e = e->next;
}
}
rcu_end_read();
return 0;
}
void
ns_enumerate_key(struct ns *ns, int key, void (*f)(void *))
void *
ns_enumerate_key(struct ns *ns, int key, void *(*f)(void *))
{
uint i = key % NHASH;
rcu_begin_read();
struct elem *e = ns->table[i].chain;
while (e) {
if (e->key == key)
(*f)(e->val);
if (e->key == key) {
void *r = (*f)(e->val);
if (r)
return r;
}
e = e->next;
}
rcu_end_read();
return 0;
}
......@@ -10,9 +10,9 @@
#include "proc.h"
#include "xv6-mtrace.h"
struct runq runqs[NCPU];
int __attribute__ ((aligned (CACHELINE))) idle[NCPU];
struct ns *nspid __attribute__ ((aligned (CACHELINE)));
struct ns *nsrunq __attribute__ ((aligned (CACHELINE)));
static struct proc *initproc __attribute__ ((aligned (CACHELINE)));
extern void forkret(void);
......@@ -27,13 +27,12 @@ pinit(void)
if (nspid == 0)
panic("pinit");
for (c = 0; c < NCPU; c++) {
nsrunq = nsalloc();
if (nsrunq == 0)
panic("pinit runq");
for (c = 0; c < NCPU; c++)
idle[c] = 1;
runqs[c].name[0] = (char) (c + '0');
safestrcpy(runqs[c].name+1, "runq", MAXNAME-1);
initlock(&runqs[c].lock, runqs[c].name);
STAILQ_INIT(&runqs[c].runq);
}
}
//PAGEBREAK: 32
......@@ -56,6 +55,7 @@ allocproc(void)
p->pid = ns_allockey(nspid);
p->epoch = INF;
p->cpuid = cpu->id;
p->on_runq = -1;
snprintf(p->lockname, sizeof(p->lockname), "proc:%d", p->pid);
initlock(&p->lock, p->lockname);
......@@ -89,17 +89,6 @@ allocproc(void)
return p;
}
static void
addrun1(struct runq *rq, struct proc *p)
{
struct proc *q;
STAILQ_FOREACH(q, &rq->runq, run_next)
if (q == p)
panic("addrun1: already on queue");
p->state = RUNNABLE;
STAILQ_INSERT_TAIL(&rq->runq, p, run_next);
}
// Mark a process RUNNABLE and add it to the runq
// of its cpu. Caller must hold p->lock so that
// some other core doesn't start running the
......@@ -114,23 +103,11 @@ addrun(struct proc *p)
if(!holding(&p->lock))
panic("addrun no p->lock");
#endif
acquire(&runqs[p->cpuid].lock);
// cprintf("%d: addrun %d\n", cpu->id, p->pid);
addrun1(&runqs[p->cpuid], p);
release(&runqs[p->cpuid].lock);
}
static void
delrun1(struct runq *rq, struct proc *p)
{
struct proc *q, *nq;
STAILQ_FOREACH_SAFE(q, &rq->runq, run_next, nq) {
if (q == p) {
STAILQ_REMOVE(&rq->runq, q, proc, run_next);
return;
}
}
panic("delrun1: not on runq");
if (p->on_runq >= 0)
panic("addrun on runq already");
ns_insert(nsrunq, p->cpuid, p);
p->on_runq = p->cpuid;
}
void
......@@ -140,10 +117,11 @@ delrun(struct proc *p)
if(!holding(&p->lock))
panic("delrun no p->lock");
#endif
acquire(&runq->lock);
// cprintf("%d: delrun %d\n", cpu->id, p->pid);
delrun1(runq, p);
release(&runq->lock);
if (p->on_runq < 0)
panic("delrun not on runq");
ns_remove(nsrunq, p->on_runq, p);
p->on_runq = -1;
}
//PAGEBREAK: 32
......@@ -178,6 +156,7 @@ userinit(void)
p->cwd = 0; // forkret will fix in the process's context
acquire(&p->lock);
addrun(p);
p->state = RUNNABLE;
release(&p->lock);
}
......@@ -314,6 +293,7 @@ fork(int flags)
acquire(&np->lock);
addrun(np);
np->state = RUNNABLE;
release(&np->lock);
// cprintf("%d: fork done (pid %d)\n", proc->pid, pid);
......@@ -423,59 +403,69 @@ wait(void)
}
void
migrate(void)
migrate(struct proc *p)
{
int c;
struct proc *p;
for (c = 0; c < NCPU; c++) {
if (c == cpu->id)
continue;
if (idle[c]) { // OK if there is a race
// cprintf("migrate to %d\n", c);
p = proc;
acquire(&p->lock);
if (p->state != RUNNABLE) {
release(&p->lock);
continue;
}
delrun(p);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
if (p == proc) {
proc->state = RUNNABLE;
sched();
}
release(&proc->lock);
return;
}
}
}
int
steal(void)
static void *
steal_cb(int k, void *v)
{
int c;
struct proc *p;
struct proc *p = v;
for (c = 0; c < NCPU; c++) {
if (c == cpu->id)
continue;
acquire(&runqs[c].lock);
STAILQ_FOREACH(p, &runqs[c].runq, run_next) {
acquire(&p->lock);
if (p->state != RUNNABLE)
panic("non-runnable proc on runq");
if (p->state != RUNNABLE) {
release(&p->lock);
return 0;
}
if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
// cprintf("%d: steal %d (%d) from %d\n", cpu->id, p->pid, p->curcycles, c);
delrun1(&runqs[c], p);
release(&runqs[c].lock);
delrun(p);
p->curcycles = 0;
p->cpuid = cpu->id;
addrun(p);
release(&p->lock);
return 1;
return p;
}
release(&p->lock);
}
release(&runqs[c].lock);
}
return 0;
}
int
steal(void)
{
void *stole = ns_enumerate(nsrunq, steal_cb);
return stole ? 1 : 0;
}
//PAGEBREAK: 42
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
......@@ -487,8 +477,6 @@ steal(void)
void
scheduler(void)
{
struct proc *p;
// allocate a fake PID for each scheduler thread
struct proc *schedp = allocproc();
if (!schedp)
......@@ -504,21 +492,14 @@ scheduler(void)
// Enable interrupts on this processor.
sti();
// Loop over process table looking for process to run.
acquire(&runq->lock);
STAILQ_FOREACH(p, &runq->runq, run_next) {
struct proc *p = ns_lookup(nsrunq, cpu->id);
if (p) {
acquire(&p->lock);
if(p->state != RUNNABLE)
panic("non-runnable process on runq\n");
STAILQ_REMOVE(&runq->runq, p, proc, run_next);
if (idle[cpu->id]) {
// cprintf("%d: no longer idle, running %d\n", cpu->id, p->pid);
if (p->state != RUNNABLE) {
release(&p->lock);
} else {
if (idle[cpu->id])
idle[cpu->id] = 0;
}
release(&runq->lock);
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
......@@ -539,12 +520,11 @@ scheduler(void)
// Process is done running for now.
// It should have changed its p->state before coming back.
proc = schedp;
if (p->state != RUNNABLE)
delrun(p);
release(&p->lock);
break;
}
if(p==0) {
release(&runq->lock);
} else {
if (steal()) {
if (idle[cpu->id])
idle[cpu->id] = 0;
......@@ -598,7 +578,7 @@ void
yield(void)
{
acquire(&proc->lock); //DOC: yieldlock
addrun(proc);
proc->state = RUNNABLE;
sched();
release(&proc->lock);
}
......@@ -651,7 +631,7 @@ kill(int pid)
return 0;
}
void procdump(int k, void *v)
void *procdump(int k, void *v)
{
struct proc *p = (struct proc *) v;
......@@ -678,6 +658,7 @@ void procdump(int k, void *v)
cprintf(" %p", pc[i]);
}
cprintf("\n");
return 0;
}
//PAGEBREAK: 36
......
......@@ -93,6 +93,7 @@ struct proc {
uint epoch;
uint rcu_read_depth;
char lockname[16];
int on_runq;
};
// Process memory is laid out contiguously, low addresses first:
......@@ -116,13 +117,6 @@ struct cpu {
struct cpu *cpu;
struct proc *proc; // The currently-running process.
struct kmem *kmem; // The per-core memory table
struct runq *runq; // The per-core runq
} __attribute__ ((aligned (CACHELINE)));
struct runq {
char name[MAXNAME];
struct spinlock lock;
STAILQ_HEAD(runlist, proc) runq;
} __attribute__ ((aligned (CACHELINE)));
struct condtab {
......@@ -132,7 +126,6 @@ struct condtab {
} __attribute__ ((aligned (CACHELINE)));
extern struct cpu cpus[NCPU];
extern struct runq runqs[NCPU];
extern struct condtab condtabs[NCPU];
extern int ncpu;
extern struct ns *nspid;
......@@ -148,4 +141,3 @@ extern struct ns *nspid;
extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()].cpu
extern struct proc *proc __asm("%gs:4"); // cpus[cpunum()].proc
extern struct kmem *kmem __asm("%gs:8"); // &cpu[cpunum()].kmem
extern struct runq *runq __asm("%gs:12"); // &cpu[cpunum()].runq
......@@ -33,12 +33,13 @@ rcu_alloc()
return kmalloc(sizeof(struct rcu));
}
void
void *
rcu_min(int key, void *v){
struct proc *p = (struct proc *) v;
if (min_epoch > p->epoch) {
min_epoch = p->epoch;
}
return 0;
}
// XXX use atomic instruction to update list (instead of holding lock)
......
......@@ -90,8 +90,8 @@ seginit(void)
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
// Map cpu, curproc, kmem, runq
c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 16, 0);
// Map cpu, curproc, kmem
c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 12, 0);
// lgt((void *) v2p((void*)(c->gdt)), sizeof(c->gdt));
lgdt((void *)(c->gdt), sizeof(c->gdt));
......@@ -101,7 +101,6 @@ seginit(void)
cpu = c;
proc = 0;
kmem = &kmems[cpunum()];
runq = &runqs[cpunum()];
}
void
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论