提交 95fc055b 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

Merge branch 'scale-amd64' of git+ssh://pdos.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

...@@ -19,6 +19,7 @@ struct cpu { ...@@ -19,6 +19,7 @@ struct cpu {
struct cilkframe *cilkframe; struct cilkframe *cilkframe;
int timer_printpc; int timer_printpc;
atomic<u64> tlbflush_done; // last tlb flush req done on this cpu atomic<u64> tlbflush_done; // last tlb flush req done on this cpu
struct proc *prev; // The previously-running process
// Cpu-local storage variables; see below // Cpu-local storage variables; see below
struct cpu *cpu; struct cpu *cpu;
......
...@@ -125,6 +125,9 @@ void ideinit(void); ...@@ -125,6 +125,9 @@ void ideinit(void);
void ideintr(void); void ideintr(void);
void iderw(struct buf*); void iderw(struct buf*);
// idle.cc
extern struct proc *idlep[NCPU];
// ioapic.c // ioapic.c
void ioapicenable(int irq, int cpu); void ioapicenable(int irq, int cpu);
...@@ -174,7 +177,7 @@ int piperead(struct pipe*, char*, int); ...@@ -174,7 +177,7 @@ int piperead(struct pipe*, char*, int);
int pipewrite(struct pipe*, char*, int); int pipewrite(struct pipe*, char*, int);
// proc.c // proc.c
void addrun(struct proc *); struct proc* allocproc(void);
struct proc* copyproc(struct proc*); struct proc* copyproc(struct proc*);
void exit(void); void exit(void);
int fork(int); int fork(int);
...@@ -183,7 +186,6 @@ int kill(int); ...@@ -183,7 +186,6 @@ int kill(int);
void pinit(void); void pinit(void);
void procdumpall(void); void procdumpall(void);
void scheduler(void) __noret__; void scheduler(void) __noret__;
void sched(void);
void userinit(void); void userinit(void);
int wait(void); int wait(void);
void yield(void); void yield(void);
...@@ -201,6 +203,12 @@ int sampintr(struct trapframe*); ...@@ -201,6 +203,12 @@ int sampintr(struct trapframe*);
void sampdump(void); void sampdump(void);
void sampconf(void); void sampconf(void);
// sched.cc
void addrun(struct proc *);
void sched(void);
void post_swtch(void);
void scheddump(void);
// spinlock.c // spinlock.c
void acquire(struct spinlock*); void acquire(struct spinlock*);
int tryacquire(struct spinlock*); int tryacquire(struct spinlock*);
...@@ -242,8 +250,7 @@ void uartputc(char c); ...@@ -242,8 +250,7 @@ void uartputc(char c);
void uartintr(void); void uartintr(void);
// vm.c // vm.c
void switchuvm(struct proc*); void switchvm(struct proc*);
void switchkvm(void);
int pagefault(struct vmap *, uptr, u32); int pagefault(struct vmap *, uptr, u32);
// wq.c // wq.c
......
...@@ -130,5 +130,5 @@ struct taskstate ...@@ -130,5 +130,5 @@ struct taskstate
(u32) ((u64)(rip)>>32), 0, \ (u32) ((u64)(rip)>>32), 0, \
} }
#define PROC_KSTACK_OFFSET 40 #define PROC_KSTACK_OFFSET 48
#define TRAPFRAME_SIZE 192 #define TRAPFRAME_SIZE 192
...@@ -34,14 +34,14 @@ struct mtrace_stacks { ...@@ -34,14 +34,14 @@ struct mtrace_stacks {
}; };
#endif #endif
enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE }; enum procstate { EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Per-process state // Per-process state
struct proc : public rcu_freed { struct proc : public rcu_freed {
struct vmap *vmap; // va -> vma struct vmap *vmap; // va -> vma
uptr brk; // Top of heap uptr brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process char *kstack; // Bottom of kernel stack for this process
enum procstate state; // Process state enum procstate _state; // Process state
volatile int pid; // Process ID volatile int pid; // Process ID
struct proc *parent; // Parent process struct proc *parent; // Parent process
struct trapframe *tf; // Trap frame for current syscall struct trapframe *tf; // Trap frame for current syscall
...@@ -74,7 +74,7 @@ struct proc : public rcu_freed { ...@@ -74,7 +74,7 @@ struct proc : public rcu_freed {
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0), proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0), _state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0), cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0) on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{ {
...@@ -98,3 +98,36 @@ struct proc : public rcu_freed { ...@@ -98,3 +98,36 @@ struct proc : public rcu_freed {
virtual void do_gc() { delete this; } virtual void do_gc() { delete this; }
NEW_DELETE_OPS(proc) NEW_DELETE_OPS(proc)
}; };
static inline void
set_proc_state(struct proc *p, enum procstate s)
{
switch(p->_state) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
p->_state = s;
}
static inline enum procstate
get_proc_state(struct proc *p)
{
return p->_state;
}
...@@ -2,3 +2,4 @@ void delrun(struct proc*); ...@@ -2,3 +2,4 @@ void delrun(struct proc*);
struct proc* schednext(void); struct proc* schednext(void);
int steal(void); int steal(void);
void addrun(struct proc*); void addrun(struct proc*);
...@@ -13,6 +13,7 @@ OBJS = \ ...@@ -13,6 +13,7 @@ OBJS = \
file.o \ file.o \
fmt.o \ fmt.o \
fs.o \ fs.o \
idle.o \
ioapic.o \ ioapic.o \
lapic.o \ lapic.o \
hwvm.o \ hwvm.o \
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include "queue.h" #include "queue.h"
#include "proc.hh" #include "proc.hh"
#include "mtrace.h" #include "mtrace.h"
#include "qlock.h"
#define NSLOTS (1 << CILKSHIFT) #define NSLOTS (1 << CILKSHIFT)
...@@ -44,7 +43,7 @@ struct cilkqueue { ...@@ -44,7 +43,7 @@ struct cilkqueue {
struct cilkthread *thread[NSLOTS]; struct cilkthread *thread[NSLOTS];
volatile int head __mpalign__; volatile int head __mpalign__;
qlock_t lock; struct spinlock lock;
volatile int tail; volatile int tail;
__padout__; __padout__;
} __mpalign__; } __mpalign__;
...@@ -65,8 +64,8 @@ struct cilkstat { ...@@ -65,8 +64,8 @@ struct cilkstat {
__padout__; __padout__;
} __mpalign__; } __mpalign__;
struct cilkqueue queue[NCPU] __mpalign__; static struct cilkqueue queue[NCPU] __mpalign__;
struct cilkstat stat[NCPU] __mpalign__; static struct cilkstat stat[NCPU] __mpalign__;
static struct cilkqueue * static struct cilkqueue *
cilk_cur(void) cilk_cur(void)
...@@ -107,18 +106,17 @@ __cilk_push(struct cilkqueue *q, struct cilkthread *t) ...@@ -107,18 +106,17 @@ __cilk_push(struct cilkqueue *q, struct cilkthread *t)
static struct cilkthread * static struct cilkthread *
__cilk_pop(struct cilkqueue *q) __cilk_pop(struct cilkqueue *q)
{ {
struct qnode qn;
int i; int i;
ql_lock(&q->lock, &qn); acquire(&q->lock);
i = q->head; i = q->head;
if ((i - q->tail) == 0) { if ((i - q->tail) == 0) {
ql_unlock(&q->lock, &qn); release(&q->lock);
return 0; return 0;
} }
i = (i-1) & (NSLOTS-1); i = (i-1) & (NSLOTS-1);
q->head--; q->head--;
ql_unlock(&q->lock, &qn); release(&q->lock);
cilk_stat()->pop++; cilk_stat()->pop++;
return q->thread[i]; return q->thread[i];
...@@ -127,18 +125,17 @@ __cilk_pop(struct cilkqueue *q) ...@@ -127,18 +125,17 @@ __cilk_pop(struct cilkqueue *q)
static struct cilkthread * static struct cilkthread *
__cilk_steal(struct cilkqueue *q) __cilk_steal(struct cilkqueue *q)
{ {
struct qnode qn;
int i; int i;
ql_lock(&q->lock, &qn); acquire(&q->lock);
i = q->tail; i = q->tail;
if ((i - q->head) == 0) { if ((i - q->head) == 0) {
ql_unlock(&q->lock, &qn); release(&q->lock);
return 0; return 0;
} }
i = i & (NSLOTS-1); i = i & (NSLOTS-1);
q->tail++; q->tail++;
ql_unlock(&q->lock, &qn); release(&q->lock);
cilk_stat()->steal++; cilk_stat()->steal++;
return q->thread[i]; return q->thread[i];
...@@ -161,9 +158,8 @@ __cilk_run(struct cilkthread *th) ...@@ -161,9 +158,8 @@ __cilk_run(struct cilkthread *th)
// Guarantees some core will at some point execute the work. // Guarantees some core will at some point execute the work.
// The current core might execute the work immediately. // The current core might execute the work immediately.
void void
cilk_push(void *rip, u64 arg0, u64 arg1) cilk_push(void (*fn)(uptr, uptr), u64 arg0, u64 arg1)
{ {
void (*fn)(uptr, uptr) = (void(*)(uptr,uptr))rip;
struct cilkthread *th; struct cilkthread *th;
th = (struct cilkthread *) kalloc(); th = (struct cilkthread *) kalloc();
...@@ -171,7 +167,7 @@ cilk_push(void *rip, u64 arg0, u64 arg1) ...@@ -171,7 +167,7 @@ cilk_push(void *rip, u64 arg0, u64 arg1)
fn(arg0, arg1); fn(arg0, arg1);
return; return;
} }
th->rip = (uptr) rip; th->rip = (uptr) fn;
th->arg0 = arg0; th->arg0 = arg0;
th->arg1 = arg1; th->arg1 = arg1;
th->frame = cilk_frame(); th->frame = cilk_frame();
...@@ -281,7 +277,7 @@ testcilk(void) ...@@ -281,7 +277,7 @@ testcilk(void)
s = rdtsc(); s = rdtsc();
cilk_start(); cilk_start();
for (i = 0; i < iters; i++) for (i = 0; i < iters; i++)
cilk_push((void*) __test_stub, i, i); cilk_push(__test_stub, i, i);
cilk_end(); cilk_end();
e = rdtsc(); e = rdtsc();
cprintf("testcilk: %lu\n", (e-s)/iters); cprintf("testcilk: %lu\n", (e-s)/iters);
...@@ -306,6 +302,6 @@ initcilk(void) ...@@ -306,6 +302,6 @@ initcilk(void)
int i; int i;
for (i = 0; i < NCPU; i++) for (i = 0; i < NCPU; i++)
ql_init(&queue[i].lock, "queue lock"); initlock(&queue[i].lock, "queue lock", LOCKSTAT_CILK);
} }
#endif // CILKENABLE #endif // CILKENABLE
...@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout) ...@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters); LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters);
myproc()->oncv = cv; myproc()->oncv = cv;
myproc()->state = SLEEPING; set_proc_state(myproc(), SLEEPING);
if (timeout) { if (timeout) {
acquire(&sleepers_lock); acquire(&sleepers_lock);
...@@ -92,10 +92,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout) ...@@ -92,10 +92,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
} }
release(&cv->lock); release(&cv->lock);
sched(); sched();
release(&myproc()->lock);
// Reacquire original lock. // Reacquire original lock.
acquire(lk); acquire(lk);
} }
...@@ -115,9 +112,9 @@ cv_wakeup(struct condvar *cv) ...@@ -115,9 +112,9 @@ cv_wakeup(struct condvar *cv)
acquire(&cv->lock); acquire(&cv->lock);
LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) { LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) {
acquire(&p->lock); acquire(&p->lock);
if (p->state != SLEEPING) if (get_proc_state(p) != SLEEPING)
panic("cv_wakeup: pid %u name %s state %u", panic("cv_wakeup: pid %u name %s state %u",
p->pid, p->name, p->state); p->pid, p->name, get_proc_state(p));
if (p->oncv != cv) if (p->oncv != cv)
panic("cv_wakeup: pid %u name %s p->cv %p cv %p", panic("cv_wakeup: pid %u name %s p->cv %p cv %p",
p->pid, p->name, p->oncv, cv); p->pid, p->name, p->oncv, cv);
......
...@@ -213,7 +213,7 @@ exec(const char *path, char **argv) ...@@ -213,7 +213,7 @@ exec(const char *path, char **argv)
myproc()->brk = BRK + 8; // XXX so that brk-1 points within heap vma.. myproc()->brk = BRK + 8; // XXX so that brk-1 points within heap vma..
myproc()->tf->rip = elf.entry; // main myproc()->tf->rip = elf.entry; // main
switchuvm(myproc()); switchvm(myproc());
oldvmap->decref(); oldvmap->decref();
gc_end_epoch(); gc_end_epoch();
......
...@@ -106,7 +106,7 @@ setupkshared(pgmap *pml4, char *kshared) ...@@ -106,7 +106,7 @@ setupkshared(pgmap *pml4, char *kshared)
// Switch h/w page table register to the kernel-only page table, // Switch h/w page table register to the kernel-only page table,
// for when no process is running. // for when no process is running.
void static void
switchkvm(void) switchkvm(void)
{ {
lcr3(v2p(&kpml4)); // switch to the kernel page table lcr3(v2p(&kpml4)); // switch to the kernel page table
...@@ -114,7 +114,7 @@ switchkvm(void) ...@@ -114,7 +114,7 @@ switchkvm(void)
// Switch TSS and h/w page table to correspond to process p. // Switch TSS and h/w page table to correspond to process p.
void void
switchuvm(struct proc *p) switchvm(struct proc *p)
{ {
u64 base = (u64) &mycpu()->ts; u64 base = (u64) &mycpu()->ts;
pushcli(); pushcli();
...@@ -124,9 +124,11 @@ switchuvm(struct proc *p) ...@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE; mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb); mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG); ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0) if (p->vmap != 0 && p->vmap->pml4 != 0)
panic("switchuvm: no vmap/pml4");
lcr3(v2p(p->vmap->pml4)); // switch to new address space lcr3(v2p(p->vmap->pml4)); // switch to new address space
else
switchkvm();
popcli(); popcli();
} }
......
#include "types.h"
#include "kernel.hh"
#include "amd64.h"
#include "spinlock.h"
#include "condvar.h"
#include "proc.hh"
#include "cpu.hh"
#include "sched.hh"
struct proc *idlep[NCPU] __mpalign__;
void
idleloop(void)
{
// Test the work queue
//extern void testwq(void);
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
//mtstart(scheduler, idlep);
sti();
for (;;) {
acquire(&myproc()->lock);
set_proc_state(myproc(), RUNNABLE);
sched();
if (steal() == 0) {
int worked;
do {
assert(mycpu()->ncli == 0);
worked = wq_trywork();
} while(worked);
sti();
}
}
}
void
initidle(void)
{
// allocate a fake PID for each scheduler thread
struct proc *p = allocproc();
if (!p)
panic("initidle allocproc");
snprintf(p->name, sizeof(p->name), "idle_%u", cpunum());
mycpu()->proc = p;
myproc()->cpu_pin = 1;
idlep[cpunum()] = p;
}
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
#include "condvar.h" #include "condvar.h"
#include "proc.hh" #include "proc.hh"
extern void initidle(void);
extern void idleloop(void);
static volatile int bstate; static volatile int bstate;
void void
...@@ -18,9 +21,10 @@ mpboot(void) ...@@ -18,9 +21,10 @@ mpboot(void)
inittls(); inittls();
initlapic(); initlapic();
initsamp(); initsamp();
initidle();
initnmi(); initnmi();
bstate = 1; bstate = 1;
scheduler(); // start running processes idleloop();
} }
static void static void
...@@ -90,6 +94,7 @@ cmain(u64 mbmagic, u64 mbaddr) ...@@ -90,6 +94,7 @@ cmain(u64 mbmagic, u64 mbaddr)
initlockstat(); initlockstat();
initpci(); initpci();
initnet(); initnet();
initidle();
if (VERBOSE) if (VERBOSE)
cprintf("ncpu %d %lu MHz\n", ncpu, cpuhz / 1000000); cprintf("ncpu %d %lu MHz\n", ncpu, cpuhz / 1000000);
...@@ -107,6 +112,7 @@ cmain(u64 mbmagic, u64 mbaddr) ...@@ -107,6 +112,7 @@ cmain(u64 mbmagic, u64 mbaddr)
panic("TRAPFRAME_SIZE mismatch: %d %ld\n", panic("TRAPFRAME_SIZE mismatch: %d %ld\n",
TRAPFRAME_SIZE, sizeof(trapframe)); TRAPFRAME_SIZE, sizeof(trapframe));
scheduler(); idleloop();
panic("Unreachable"); panic("Unreachable");
} }
...@@ -20,7 +20,7 @@ proc_hash(const u32 &p) ...@@ -20,7 +20,7 @@ proc_hash(const u32 &p)
return p; return p;
} }
int __mpalign__ idle[NCPU]; //int __mpalign__ idle[NCPU];
xns<u32, proc*, proc_hash> *xnspid __mpalign__; xns<u32, proc*, proc_hash> *xnspid __mpalign__;
static struct proc *bootproc __mpalign__; static struct proc *bootproc __mpalign__;
...@@ -30,84 +30,22 @@ struct kstack_tag kstack_tag[NCPU]; ...@@ -30,84 +30,22 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 }; enum { sched_debug = 0 };
void
sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(myproc()->state == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (myproc()->state == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
swtch(&myproc()->context, mycpu()->scheduler);
mycpu()->intena = intena;
}
// Give up the CPU for one scheduling round. // Give up the CPU for one scheduling round.
void void
yield(void) yield(void)
{ {
acquire(&myproc()->lock); //DOC: yieldlock acquire(&myproc()->lock); //DOC: yieldlock
myproc()->state = RUNNABLE; set_proc_state(myproc(), RUNNABLE);
sched(); sched();
release(&myproc()->lock);
} }
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
}
// A fork child's very first scheduling by scheduler() // A fork child's very first scheduling by scheduler()
// will swtch here. "Return" to user space. // will swtch here. "Return" to user space.
static void void
forkret(void) forkret(void)
{ {
// Still holding proc->lock from scheduler. post_swtch();
release(&myproc()->lock);
// Just for the first process. can't do it earlier // Just for the first process. can't do it earlier
// b/c file system code needs a process context // b/c file system code needs a process context
...@@ -153,7 +91,7 @@ exit(void) ...@@ -153,7 +91,7 @@ exit(void)
SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) { SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) {
acquire(&p->lock); acquire(&p->lock);
p->parent = bootproc; p->parent = bootproc;
if(p->state == ZOMBIE) if(get_proc_state(p) == ZOMBIE)
wakeupinit = 1; wakeupinit = 1;
SLIST_REMOVE(&(myproc()->childq), p, proc, child_next); SLIST_REMOVE(&(myproc()->childq), p, proc, child_next);
release(&p->lock); release(&p->lock);
...@@ -172,7 +110,7 @@ exit(void) ...@@ -172,7 +110,7 @@ exit(void)
cv_wakeup(&bootproc->cv); cv_wakeup(&bootproc->cv);
// Jump into the scheduler, never to return. // Jump into the scheduler, never to return.
myproc()->state = ZOMBIE; set_proc_state(myproc(), ZOMBIE);
sched(); sched();
panic("zombie exit"); panic("zombie exit");
} }
...@@ -187,7 +125,7 @@ freeproc(struct proc *p) ...@@ -187,7 +125,7 @@ freeproc(struct proc *p)
// If found, change state to EMBRYO and initialize // If found, change state to EMBRYO and initialize
// state required to run in the kernel. // state required to run in the kernel.
// Otherwise return 0. // Otherwise return 0.
static struct proc* struct proc*
allocproc(void) allocproc(void)
{ {
struct proc *p; struct proc *p;
...@@ -276,104 +214,9 @@ inituser(void) ...@@ -276,104 +214,9 @@ inituser(void)
void void
initproc(void) initproc(void)
{ {
int c;
xnspid = new xns<u32, proc*, proc_hash>(false); xnspid = new xns<u32, proc*, proc_hash>(false);
if (xnspid == 0) if (xnspid == 0)
panic("pinit"); panic("pinit");
for (c = 0; c < NCPU; c++)
idle[c] = 1;
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
void
scheduler(void)
{
// allocate a fake PID for each scheduler thread
struct proc *schedp = allocproc();
if (!schedp)
panic("scheduler allocproc");
snprintf(schedp->name, sizeof(schedp->name), "scheduler_%u", cpunum());
mycpu()->proc = schedp;
myproc()->cpu_pin = 1;
// Test the work queue
//extern void testwq(void);
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtstart(scheduler, schedp);
for(;;){
// Enable interrupts on this processor.
sti();
struct proc *p = schednext();
if (p) {
acquire(&p->lock);
if (p->state != RUNNABLE) {
release(&p->lock);
} else {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu()->proc = p;
switchuvm(p);
p->state = RUNNING;
p->tsc = rdtsc();
mtpause(schedp);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)threadstub)
{
mtresume(p);
}
mtrec();
swtch(&mycpu()->scheduler, myproc()->context);
mtresume(schedp);
mtign();
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu()->proc = schedp;
if (p->state != RUNNABLE)
delrun(p);
release(&p->lock);
}
} else {
if (steal()) {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
} else {
if (!idle[mycpu()->id])
idle[mycpu()->id] = 1;
}
}
if (idle[mycpu()->id]) {
int worked;
do {
assert(mycpu()->ncli == 0);
worked = wq_trywork();
} while(worked);
sti();
}
}
} }
// Grow/shrink current process's memory by n bytes. // Grow/shrink current process's memory by n bytes.
...@@ -456,7 +299,7 @@ kill(int pid) ...@@ -456,7 +299,7 @@ kill(int pid)
} }
acquire(&p->lock); acquire(&p->lock);
p->killed = 1; p->killed = 1;
if(p->state == SLEEPING){ if(get_proc_state(p) == SLEEPING){
// XXX // XXX
// we need to wake p up if it is cv_sleep()ing. // we need to wake p up if it is cv_sleep()ing.
// can't change p from SLEEPING to RUNNABLE since that // can't change p from SLEEPING to RUNNABLE since that
...@@ -479,7 +322,6 @@ void ...@@ -479,7 +322,6 @@ void
procdumpall(void) procdumpall(void)
{ {
static const char *states[] = { static const char *states[] = {
/* [UNUSED] = */ "unused",
/* [EMBRYO] = */ "embryo", /* [EMBRYO] = */ "embryo",
/* [SLEEPING] = */ "sleep ", /* [SLEEPING] = */ "sleep ",
/* [RUNNABLE] = */ "runble", /* [RUNNABLE] = */ "runble",
...@@ -491,8 +333,9 @@ procdumpall(void) ...@@ -491,8 +333,9 @@ procdumpall(void)
uptr pc[10]; uptr pc[10];
for (proc *p : xnspid) { for (proc *p : xnspid) {
if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) if(get_proc_state(p) >= 0 && get_proc_state(p) < NELEM(states) &&
state = states[p->state]; states[get_proc_state(p)])
state = states[get_proc_state(p)];
else else
state = "???"; state = "???";
...@@ -502,7 +345,7 @@ procdumpall(void) ...@@ -502,7 +345,7 @@ procdumpall(void)
cprintf("\n%-3d %-10s %8s %2u %lu\n", cprintf("\n%-3d %-10s %8s %2u %lu\n",
p->pid, name, state, p->cpuid, p->tsc); p->pid, name, state, p->cpuid, p->tsc);
if(p->state == SLEEPING){ if(get_proc_state(p) == SLEEPING){
getcallerpcs((void*)p->context->rbp, pc, NELEM(pc)); getcallerpcs((void*)p->context->rbp, pc, NELEM(pc));
for(int i=0; i<10 && pc[i] != 0; i++) for(int i=0; i<10 && pc[i] != 0; i++)
cprintf(" %lx\n", pc[i]); cprintf(" %lx\n", pc[i]);
...@@ -531,7 +374,6 @@ fork(int flags) ...@@ -531,7 +374,6 @@ fork(int flags)
if((np->vmap = myproc()->vmap->copy(cow)) == 0){ if((np->vmap = myproc()->vmap->copy(cow)) == 0){
ksfree(slab_stack, np->kstack); ksfree(slab_stack, np->kstack);
np->kstack = 0; np->kstack = 0;
np->state = UNUSED;
if (!xnspid->remove(np->pid, &np)) if (!xnspid->remove(np->pid, &np))
panic("fork: ns_remove"); panic("fork: ns_remove");
freeproc(np); freeproc(np);
...@@ -559,14 +401,11 @@ fork(int flags) ...@@ -559,14 +401,11 @@ fork(int flags)
SLIST_INSERT_HEAD(&myproc()->childq, np, child_next); SLIST_INSERT_HEAD(&myproc()->childq, np, child_next);
release(&myproc()->lock); release(&myproc()->lock);
if (migrate(np)) {
acquire(&np->lock); acquire(&np->lock);
np->cpuid = mycpu()->id; np->cpuid = mycpu()->id;
addrun(np); addrun(np);
release(&np->lock); release(&np->lock);
}
// cprintf("%d: fork done (pid %d)\n", myproc()->pid, pid);
return pid; return pid;
} }
...@@ -585,7 +424,7 @@ wait(void) ...@@ -585,7 +424,7 @@ wait(void)
SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) { SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) {
havekids = 1; havekids = 1;
acquire(&p->lock); acquire(&p->lock);
if(p->state == ZOMBIE){ if(get_proc_state(p) == ZOMBIE){
release(&p->lock); // noone else better be trying to lock p release(&p->lock); // noone else better be trying to lock p
pid = p->pid; pid = p->pid;
SLIST_REMOVE(&myproc()->childq, p, proc, child_next); SLIST_REMOVE(&myproc()->childq, p, proc, child_next);
...@@ -593,7 +432,6 @@ wait(void) ...@@ -593,7 +432,6 @@ wait(void)
ksfree(slab_stack, p->kstack); ksfree(slab_stack, p->kstack);
p->kstack = 0; p->kstack = 0;
p->vmap->decref(); p->vmap->decref();
p->state = UNUSED;
if (!xnspid->remove(p->pid, &p)) if (!xnspid->remove(p->pid, &p))
panic("wait: ns_remove"); panic("wait: ns_remove");
p->pid = 0; p->pid = 0;
...@@ -622,7 +460,7 @@ wait(void) ...@@ -622,7 +460,7 @@ wait(void)
void void
threadhelper(void (*fn)(void *), void *arg) threadhelper(void (*fn)(void *), void *arg)
{ {
release(&myproc()->lock); // initially held by scheduler post_swtch();
mtstart(fn, myproc()); mtstart(fn, myproc());
fn(arg); fn(arg);
exit(); exit();
...@@ -664,7 +502,6 @@ threadpin(void (*fn)(void*), void *arg, const char *name, int cpu) ...@@ -664,7 +502,6 @@ threadpin(void (*fn)(void*), void *arg, const char *name, int cpu)
p->cpuid = cpu; p->cpuid = cpu;
p->cpu_pin = 1; p->cpu_pin = 1;
acquire(&p->lock); acquire(&p->lock);
p->state = RUNNABLE;
addrun(p); addrun(p);
release(&p->lock); release(&p->lock);
} }
...@@ -24,12 +24,82 @@ struct runq { ...@@ -24,12 +24,82 @@ struct runq {
static struct runq runq[NCPU] __mpalign__; static struct runq runq[NCPU] __mpalign__;
void void
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != idlep[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
}
void
sched(void)
{
extern void threadstub(void);
extern void forkret(void);
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next == nullptr) {
if (get_proc_state(myproc()) != RUNNABLE) {
next = idlep[mycpu()->id];
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
return;
}
}
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
}
void
addrun(struct proc *p) addrun(struct proc *p)
{ {
// Always called with p->lock held // Always called with p->lock held
struct runq *q; struct runq *q;
p->state = RUNNABLE; set_proc_state(p, RUNNABLE);
q = &runq[p->cpuid]; q = &runq[p->cpuid];
acquire(&q->lock); acquire(&q->lock);
...@@ -71,9 +141,10 @@ steal(void) ...@@ -71,9 +141,10 @@ steal(void)
if (tryacquire(&q->lock) == 0) if (tryacquire(&q->lock) == 0)
continue; continue;
STAILQ_FOREACH(p, &q->q, runqlink) { STAILQ_FOREACH(p, &q->q, runqlink) {
if (p->state == RUNNABLE && !p->cpu_pin && if (get_proc_state(p) == RUNNABLE && !p->cpu_pin &&
p->curcycles != 0 && p->curcycles > VICTIMAGE) p->curcycles != 0 && p->curcycles > VICTIMAGE)
{ {
STAILQ_REMOVE(&q->q, p, proc, runqlink);
steal = p; steal = p;
break; break;
} }
...@@ -82,10 +153,9 @@ steal(void) ...@@ -82,10 +153,9 @@ steal(void)
if (steal) { if (steal) {
acquire(&steal->lock); acquire(&steal->lock);
if (steal->state == RUNNABLE && !steal->cpu_pin && if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE) steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{ {
delrun(steal);
steal->curcycles = 0; steal->curcycles = 0;
steal->cpuid = mycpu()->id; steal->cpuid = mycpu()->id;
addrun(steal); addrun(steal);
...@@ -93,6 +163,8 @@ steal(void) ...@@ -93,6 +163,8 @@ steal(void)
r = 1; r = 1;
break; break;
} }
if (get_proc_state(steal) == RUNNABLE)
addrun(steal);
release(&steal->lock); release(&steal->lock);
} }
} }
...@@ -112,10 +184,8 @@ schednext(void) ...@@ -112,10 +184,8 @@ schednext(void)
q = &runq[mycpu()->id]; q = &runq[mycpu()->id];
acquire(&q->lock); acquire(&q->lock);
p = STAILQ_LAST(&q->q, proc, runqlink); p = STAILQ_LAST(&q->q, proc, runqlink);
if (p) { if (p)
STAILQ_REMOVE(&q->q, p, proc, runqlink); STAILQ_REMOVE(&q->q, p, proc, runqlink);
STAILQ_INSERT_HEAD(&q->q, p, runqlink);
}
release(&q->lock); release(&q->lock);
popcli(); popcli();
return p; return p;
...@@ -131,3 +201,39 @@ initsched(void) ...@@ -131,3 +201,39 @@ initsched(void)
STAILQ_INIT(&runq[i].q); STAILQ_INIT(&runq[i].q);
} }
} }
#if 0
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
}
#endif
...@@ -179,7 +179,7 @@ trap(struct trapframe *tf) ...@@ -179,7 +179,7 @@ trap(struct trapframe *tf)
// Force process to give up CPU on clock tick. // Force process to give up CPU on clock tick.
// If interrupts were on while locks held, would need to check nlock. // If interrupts were on while locks held, would need to check nlock.
if(myproc() && myproc()->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER) if(myproc() && get_proc_state(myproc()) == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
yield(); yield();
// Check if the process has been killed since we yielded // Check if the process has been killed since we yielded
......
...@@ -220,7 +220,7 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, ...@@ -220,7 +220,7 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
safestrcpy(p->name, name, sizeof(p->name)); safestrcpy(p->name, name, sizeof(p->name));
acquire(&p->lock); acquire(&p->lock);
p->state = RUNNABLE; set_proc_state(p, RUNNABLE);
addrun(p); addrun(p);
release(&p->lock); release(&p->lock);
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论