checkpoint new sched code

上级 472e7a01
......@@ -20,6 +20,8 @@ struct cpu {
int timer_printpc;
atomic<u64> tlbflush_done; // last tlb flush req done on this cpu
struct proc *prev;
// Cpu-local storage variables; see below
struct cpu *cpu;
struct proc *proc; // The currently-running process.
......
......@@ -184,13 +184,17 @@ int kill(int);
void pinit(void);
void procdumpall(void);
void scheduler(void) __noret__;
void sched(void);
void userinit(void);
int wait(void);
void yield(void);
struct proc* threadalloc(void (*fn)(void*), void *arg);
void threadpin(void (*fn)(void*), void *arg, const char *name, int cpu);
// XXX
void sched(void);
void post_swtch(void);
void scheddump(void);
// prof.c
extern int profenable;
void profreset(void);
......
......@@ -94,7 +94,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
release(&cv->lock);
sched();
sti();
//sti();
// Reacquire original lock.
acquire(lk);
......
......@@ -245,6 +245,7 @@ consoleintr(int (*getc)(void))
switch(c){
case C('P'): // Process listing.
procdumpall();
scheddump();
break;
case C('E'): // Print user-space PCs.
for (u32 i = 0; i < NCPU; i++)
......
......@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0)
panic("switchuvm: no vmap/pml4");
lcr3(v2p(p->vmap->pml4)); // switch to new address space
if (p->vmap != 0 && p->vmap->pml4 != 0)
lcr3(v2p(p->vmap->pml4)); // switch to new address space
else
switchkvm();
popcli();
}
......
......@@ -14,76 +14,105 @@
#include "vm.hh"
#include "ns.hh"
static int __mpalign__ idle[NCPU];
static struct proc *the_idle[NCPU] __mpalign__;
extern void
forkret(void);
void
idleloop(void)
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != the_idle[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
popcli();
}
void
sched(void)
{
extern void forkret(void);
struct proc *idlep = the_idle[cpunum()];
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next) {
switchit:
pushcli();
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchuvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
} else if (get_proc_state(myproc()) != RUNNABLE) {
next = the_idle[mycpu()->id];
goto switchit;
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
}
//swtch(&myproc()->context, mycpu()->scheduler);
//mycpu()->intena = intena;
}
void
idleloop(void)
{
// Test the work queue
//extern void testwq(void);
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtstart(scheduler, idlep);
for(;;){
// Enable interrupts on this processor.
sti();
struct proc *p = schednext();
if (p) {
cli();
//acquire(&p->lock);
if (get_proc_state(p) != RUNNABLE) {
panic("Huh?");
} else {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu()->proc = p;
switchuvm(p);
set_proc_state(p, RUNNING);
p->tsc = rdtsc();
mtpause(idlep);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)threadstub)
{
mtresume(p);
}
mtrec();
swtch(&mycpu()->scheduler, myproc()->context);
mtresume(idlep);
mtign();
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu()->proc = idlep;
if (get_proc_state(p) == RUNNABLE)
addrun(p);
release(&p->lock);
}
} else {
if (steal()) {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
} else {
if (!idle[mycpu()->id])
idle[mycpu()->id] = 1;
}
}
//mtstart(scheduler, idlep);
sti();
for (;;) {
acquire(&myproc()->lock);
set_proc_state(myproc(), RUNNABLE);
sched();
if (idle[mycpu()->id]) {
if (steal() == 0) {
int worked;
do {
assert(mycpu()->ncli == 0);
......@@ -106,5 +135,4 @@ initidle(void)
mycpu()->proc = p;
myproc()->cpu_pin = 1;
the_idle[cpunum()] = p;
idle[cpunum()] = 1;
}
......@@ -30,33 +30,6 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 };
void
sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
swtch(&myproc()->context, mycpu()->scheduler);
mycpu()->intena = intena;
}
// Give up the CPU for one scheduling round.
void
yield(void)
......@@ -64,7 +37,7 @@ yield(void)
acquire(&myproc()->lock); //DOC: yieldlock
set_proc_state(myproc(), RUNNABLE);
sched();
sti();
//sti();
//release(&myproc()->lock);
}
......@@ -74,8 +47,8 @@ yield(void)
void
forkret(void)
{
sti();
//release(&myproc()->lock);
post_swtch();
// Just for the first process. can't do it earlier
// b/c file system code needs a process context
// in which to call cv_sleep().
......@@ -351,7 +324,6 @@ void
procdumpall(void)
{
static const char *states[] = {
/* [UNUSED] = */ "unused",
/* [EMBRYO] = */ "embryo",
/* [SLEEPING] = */ "sleep ",
/* [RUNNABLE] = */ "runble",
......@@ -500,8 +472,7 @@ wait(void)
void
threadhelper(void (*fn)(void *), void *arg)
{
sti();
//release(&myproc()->lock);
post_swtch();
mtstart(fn, myproc());
fn(arg);
exit();
......
......@@ -128,11 +128,29 @@ initsched(void)
int i;
for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q);
}
}
void
scheddump(void)
{
struct proc *p;
int i;
for (i = 0; i < NCPU; i++) {
struct runq *q = &runq[i];
cprintf("%u\n", i);
acquire(&q->lock);
STAILQ_FOREACH(p, &q->q, runqlink) {
cprintf(" %s\n", p->name);
}
release(&q->lock);
}
}
#if 0
static int
migrate(struct proc *p)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论