checkpoint new sched code

上级 472e7a01
...@@ -20,6 +20,8 @@ struct cpu { ...@@ -20,6 +20,8 @@ struct cpu {
int timer_printpc; int timer_printpc;
atomic<u64> tlbflush_done; // last tlb flush req done on this cpu atomic<u64> tlbflush_done; // last tlb flush req done on this cpu
struct proc *prev;
// Cpu-local storage variables; see below // Cpu-local storage variables; see below
struct cpu *cpu; struct cpu *cpu;
struct proc *proc; // The currently-running process. struct proc *proc; // The currently-running process.
......
...@@ -184,13 +184,17 @@ int kill(int); ...@@ -184,13 +184,17 @@ int kill(int);
void pinit(void); void pinit(void);
void procdumpall(void); void procdumpall(void);
void scheduler(void) __noret__; void scheduler(void) __noret__;
void sched(void);
void userinit(void); void userinit(void);
int wait(void); int wait(void);
void yield(void); void yield(void);
struct proc* threadalloc(void (*fn)(void*), void *arg); struct proc* threadalloc(void (*fn)(void*), void *arg);
void threadpin(void (*fn)(void*), void *arg, const char *name, int cpu); void threadpin(void (*fn)(void*), void *arg, const char *name, int cpu);
// XXX
void sched(void);
void post_swtch(void);
void scheddump(void);
// prof.c // prof.c
extern int profenable; extern int profenable;
void profreset(void); void profreset(void);
......
...@@ -94,7 +94,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout) ...@@ -94,7 +94,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
release(&cv->lock); release(&cv->lock);
sched(); sched();
sti(); //sti();
// Reacquire original lock. // Reacquire original lock.
acquire(lk); acquire(lk);
......
...@@ -245,6 +245,7 @@ consoleintr(int (*getc)(void)) ...@@ -245,6 +245,7 @@ consoleintr(int (*getc)(void))
switch(c){ switch(c){
case C('P'): // Process listing. case C('P'): // Process listing.
procdumpall(); procdumpall();
scheddump();
break; break;
case C('E'): // Print user-space PCs. case C('E'): // Print user-space PCs.
for (u32 i = 0; i < NCPU; i++) for (u32 i = 0; i < NCPU; i++)
......
...@@ -124,9 +124,11 @@ switchuvm(struct proc *p) ...@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE; mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb); mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG); ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0) if (p->vmap != 0 && p->vmap->pml4 != 0)
panic("switchuvm: no vmap/pml4"); lcr3(v2p(p->vmap->pml4)); // switch to new address space
lcr3(v2p(p->vmap->pml4)); // switch to new address space else
switchkvm();
popcli(); popcli();
} }
......
...@@ -14,76 +14,105 @@ ...@@ -14,76 +14,105 @@
#include "vm.hh" #include "vm.hh"
#include "ns.hh" #include "ns.hh"
static int __mpalign__ idle[NCPU];
static struct proc *the_idle[NCPU] __mpalign__; static struct proc *the_idle[NCPU] __mpalign__;
extern void
forkret(void);
void void
idleloop(void) post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != the_idle[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
popcli();
}
void
sched(void)
{ {
extern void forkret(void); int intena;
struct proc *idlep = the_idle[cpunum()];
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next) {
switchit:
pushcli();
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchuvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
} else if (get_proc_state(myproc()) != RUNNABLE) {
next = the_idle[mycpu()->id];
goto switchit;
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
}
//swtch(&myproc()->context, mycpu()->scheduler);
//mycpu()->intena = intena;
}
void
idleloop(void)
{
// Test the work queue // Test the work queue
//extern void testwq(void); //extern void testwq(void);
//testwq(); //testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys. // Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id); // mtrace_call_set(1, cpu->id);
mtstart(scheduler, idlep); //mtstart(scheduler, idlep);
for(;;){
// Enable interrupts on this processor. sti();
sti(); for (;;) {
acquire(&myproc()->lock);
struct proc *p = schednext(); set_proc_state(myproc(), RUNNABLE);
if (p) { sched();
cli();
//acquire(&p->lock);
if (get_proc_state(p) != RUNNABLE) {
panic("Huh?");
} else {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu()->proc = p;
switchuvm(p);
set_proc_state(p, RUNNING);
p->tsc = rdtsc();
mtpause(idlep);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)threadstub)
{
mtresume(p);
}
mtrec();
swtch(&mycpu()->scheduler, myproc()->context);
mtresume(idlep);
mtign();
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu()->proc = idlep;
if (get_proc_state(p) == RUNNABLE)
addrun(p);
release(&p->lock);
}
} else {
if (steal()) {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
} else {
if (!idle[mycpu()->id])
idle[mycpu()->id] = 1;
}
}
if (idle[mycpu()->id]) { if (steal() == 0) {
int worked; int worked;
do { do {
assert(mycpu()->ncli == 0); assert(mycpu()->ncli == 0);
...@@ -106,5 +135,4 @@ initidle(void) ...@@ -106,5 +135,4 @@ initidle(void)
mycpu()->proc = p; mycpu()->proc = p;
myproc()->cpu_pin = 1; myproc()->cpu_pin = 1;
the_idle[cpunum()] = p; the_idle[cpunum()] = p;
idle[cpunum()] = 1;
} }
...@@ -30,33 +30,6 @@ struct kstack_tag kstack_tag[NCPU]; ...@@ -30,33 +30,6 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 }; enum { sched_debug = 0 };
void
sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
swtch(&myproc()->context, mycpu()->scheduler);
mycpu()->intena = intena;
}
// Give up the CPU for one scheduling round. // Give up the CPU for one scheduling round.
void void
yield(void) yield(void)
...@@ -64,7 +37,7 @@ yield(void) ...@@ -64,7 +37,7 @@ yield(void)
acquire(&myproc()->lock); //DOC: yieldlock acquire(&myproc()->lock); //DOC: yieldlock
set_proc_state(myproc(), RUNNABLE); set_proc_state(myproc(), RUNNABLE);
sched(); sched();
sti(); //sti();
//release(&myproc()->lock); //release(&myproc()->lock);
} }
...@@ -74,8 +47,8 @@ yield(void) ...@@ -74,8 +47,8 @@ yield(void)
void void
forkret(void) forkret(void)
{ {
sti(); post_swtch();
//release(&myproc()->lock);
// Just for the first process. can't do it earlier // Just for the first process. can't do it earlier
// b/c file system code needs a process context // b/c file system code needs a process context
// in which to call cv_sleep(). // in which to call cv_sleep().
...@@ -351,7 +324,6 @@ void ...@@ -351,7 +324,6 @@ void
procdumpall(void) procdumpall(void)
{ {
static const char *states[] = { static const char *states[] = {
/* [UNUSED] = */ "unused",
/* [EMBRYO] = */ "embryo", /* [EMBRYO] = */ "embryo",
/* [SLEEPING] = */ "sleep ", /* [SLEEPING] = */ "sleep ",
/* [RUNNABLE] = */ "runble", /* [RUNNABLE] = */ "runble",
...@@ -500,8 +472,7 @@ wait(void) ...@@ -500,8 +472,7 @@ wait(void)
void void
threadhelper(void (*fn)(void *), void *arg) threadhelper(void (*fn)(void *), void *arg)
{ {
sti(); post_swtch();
//release(&myproc()->lock);
mtstart(fn, myproc()); mtstart(fn, myproc());
fn(arg); fn(arg);
exit(); exit();
......
...@@ -128,11 +128,29 @@ initsched(void) ...@@ -128,11 +128,29 @@ initsched(void)
int i; int i;
for (i = 0; i < NCPU; i++) { for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED); initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q); STAILQ_INIT(&runq[i].q);
} }
} }
void
scheddump(void)
{
struct proc *p;
int i;
for (i = 0; i < NCPU; i++) {
struct runq *q = &runq[i];
cprintf("%u\n", i);
acquire(&q->lock);
STAILQ_FOREACH(p, &q->q, runqlink) {
cprintf(" %s\n", p->name);
}
release(&q->lock);
}
}
#if 0 #if 0
static int static int
migrate(struct proc *p) migrate(struct proc *p)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论