Some scheduling clean up

上级 a149ec86
...@@ -19,8 +19,7 @@ struct cpu { ...@@ -19,8 +19,7 @@ struct cpu {
struct cilkframe *cilkframe; struct cilkframe *cilkframe;
int timer_printpc; int timer_printpc;
atomic<u64> tlbflush_done; // last tlb flush req done on this cpu atomic<u64> tlbflush_done; // last tlb flush req done on this cpu
struct proc *prev; // The previously-running process
struct proc *prev;
// Cpu-local storage variables; see below // Cpu-local storage variables; see below
struct cpu *cpu; struct cpu *cpu;
......
...@@ -125,6 +125,9 @@ void ideinit(void); ...@@ -125,6 +125,9 @@ void ideinit(void);
void ideintr(void); void ideintr(void);
void iderw(struct buf*); void iderw(struct buf*);
// idle.cc
extern struct proc *idlep[NCPU];
// ioapic.c // ioapic.c
void ioapicenable(int irq, int cpu); void ioapicenable(int irq, int cpu);
...@@ -174,7 +177,6 @@ int piperead(struct pipe*, char*, int); ...@@ -174,7 +177,6 @@ int piperead(struct pipe*, char*, int);
int pipewrite(struct pipe*, char*, int); int pipewrite(struct pipe*, char*, int);
// proc.c // proc.c
void addrun(struct proc *);
struct proc* allocproc(void); struct proc* allocproc(void);
struct proc* copyproc(struct proc*); struct proc* copyproc(struct proc*);
void exit(void); void exit(void);
...@@ -190,11 +192,6 @@ void yield(void); ...@@ -190,11 +192,6 @@ void yield(void);
struct proc* threadalloc(void (*fn)(void*), void *arg); struct proc* threadalloc(void (*fn)(void*), void *arg);
void threadpin(void (*fn)(void*), void *arg, const char *name, int cpu); void threadpin(void (*fn)(void*), void *arg, const char *name, int cpu);
// XXX
void sched(void);
void post_swtch(void);
void scheddump(void);
// prof.c // prof.c
extern int profenable; extern int profenable;
void profreset(void); void profreset(void);
...@@ -206,6 +203,12 @@ int sampintr(struct trapframe*); ...@@ -206,6 +203,12 @@ int sampintr(struct trapframe*);
void sampdump(void); void sampdump(void);
void sampconf(void); void sampconf(void);
// sched.cc
void addrun(struct proc *);
void sched(void);
void post_swtch(void);
void scheddump(void);
// spinlock.c // spinlock.c
void acquire(struct spinlock*); void acquire(struct spinlock*);
int tryacquire(struct spinlock*); int tryacquire(struct spinlock*);
......
...@@ -2,3 +2,4 @@ void delrun(struct proc*); ...@@ -2,3 +2,4 @@ void delrun(struct proc*);
struct proc* schednext(void); struct proc* schednext(void);
int steal(void); int steal(void);
void addrun(struct proc*); void addrun(struct proc*);
...@@ -92,10 +92,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout) ...@@ -92,10 +92,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
} }
release(&cv->lock); release(&cv->lock);
sched(); sched();
//sti();
// Reacquire original lock. // Reacquire original lock.
acquire(lk); acquire(lk);
} }
......
...@@ -245,7 +245,6 @@ consoleintr(int (*getc)(void)) ...@@ -245,7 +245,6 @@ consoleintr(int (*getc)(void))
switch(c){ switch(c){
case C('P'): // Process listing. case C('P'): // Process listing.
procdumpall(); procdumpall();
scheddump();
break; break;
case C('E'): // Print user-space PCs. case C('E'): // Print user-space PCs.
for (u32 i = 0; i < NCPU; i++) for (u32 i = 0; i < NCPU; i++)
......
#include "types.h" #include "types.h"
#include "kernel.hh" #include "kernel.hh"
#include "mmu.h"
#include "amd64.h" #include "amd64.h"
#include "spinlock.h" #include "spinlock.h"
#include "condvar.h" #include "condvar.h"
#include "queue.h"
#include "proc.hh" #include "proc.hh"
#include "cpu.hh" #include "cpu.hh"
#include "bits.hh"
#include "kmtrace.hh"
#include "sched.hh" #include "sched.hh"
#include "kalloc.hh"
#include "vm.hh"
#include "ns.hh"
static struct proc *the_idle[NCPU] __mpalign__; struct proc *idlep[NCPU] __mpalign__;
extern void
forkret(void);
void
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != the_idle[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
popcli();
}
void
sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next) {
switchit:
pushcli();
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchuvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
} else if (get_proc_state(myproc()) != RUNNABLE) {
next = the_idle[mycpu()->id];
goto switchit;
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
}
//swtch(&myproc()->context, mycpu()->scheduler);
//mycpu()->intena = intena;
}
void void
idleloop(void) idleloop(void)
...@@ -105,7 +20,6 @@ idleloop(void) ...@@ -105,7 +20,6 @@ idleloop(void)
// mtrace_call_set(1, cpu->id); // mtrace_call_set(1, cpu->id);
//mtstart(scheduler, idlep); //mtstart(scheduler, idlep);
sti(); sti();
for (;;) { for (;;) {
acquire(&myproc()->lock); acquire(&myproc()->lock);
...@@ -134,5 +48,5 @@ initidle(void) ...@@ -134,5 +48,5 @@ initidle(void)
snprintf(p->name, sizeof(p->name), "idle_%u", cpunum()); snprintf(p->name, sizeof(p->name), "idle_%u", cpunum());
mycpu()->proc = p; mycpu()->proc = p;
myproc()->cpu_pin = 1; myproc()->cpu_pin = 1;
the_idle[cpunum()] = p; idlep[cpunum()] = p;
} }
...@@ -24,6 +24,76 @@ struct runq { ...@@ -24,6 +24,76 @@ struct runq {
static struct runq runq[NCPU] __mpalign__; static struct runq runq[NCPU] __mpalign__;
void void
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != idlep[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
}
void
sched(void)
{
extern void threadstub(void);
extern void forkret(void);
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next == nullptr) {
if (get_proc_state(myproc()) != RUNNABLE) {
next = idlep[mycpu()->id];
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
return;
}
}
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchuvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
}
void
addrun(struct proc *p) addrun(struct proc *p)
{ {
// Always called with p->lock held // Always called with p->lock held
...@@ -86,7 +156,6 @@ steal(void) ...@@ -86,7 +156,6 @@ steal(void)
if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin && if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE) steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{ {
//delrun(steal);
steal->curcycles = 0; steal->curcycles = 0;
steal->cpuid = mycpu()->id; steal->cpuid = mycpu()->id;
addrun(steal); addrun(steal);
...@@ -133,24 +202,6 @@ initsched(void) ...@@ -133,24 +202,6 @@ initsched(void)
} }
} }
void
scheddump(void)
{
struct proc *p;
int i;
for (i = 0; i < NCPU; i++) {
struct runq *q = &runq[i];
cprintf("%u\n", i);
acquire(&q->lock);
STAILQ_FOREACH(p, &q->q, runqlink) {
cprintf(" %s\n", p->name);
}
release(&q->lock);
}
}
#if 0 #if 0
static int static int
migrate(struct proc *p) migrate(struct proc *p)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论