提交 95fc055b 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

Merge branch 'scale-amd64' of git+ssh://pdos.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

......@@ -19,6 +19,7 @@ struct cpu {
struct cilkframe *cilkframe;
int timer_printpc;
atomic<u64> tlbflush_done; // last tlb flush req done on this cpu
struct proc *prev; // The previously-running process
// Cpu-local storage variables; see below
struct cpu *cpu;
......
......@@ -125,6 +125,9 @@ void ideinit(void);
void ideintr(void);
void iderw(struct buf*);
// idle.cc
extern struct proc *idlep[NCPU];
// ioapic.c
void ioapicenable(int irq, int cpu);
......@@ -174,7 +177,7 @@ int piperead(struct pipe*, char*, int);
int pipewrite(struct pipe*, char*, int);
// proc.c
void addrun(struct proc *);
struct proc* allocproc(void);
struct proc* copyproc(struct proc*);
void exit(void);
int fork(int);
......@@ -183,7 +186,6 @@ int kill(int);
void pinit(void);
void procdumpall(void);
void scheduler(void) __noret__;
void sched(void);
void userinit(void);
int wait(void);
void yield(void);
......@@ -201,6 +203,12 @@ int sampintr(struct trapframe*);
void sampdump(void);
void sampconf(void);
// sched.cc
void addrun(struct proc *);
void sched(void);
void post_swtch(void);
void scheddump(void);
// spinlock.c
void acquire(struct spinlock*);
int tryacquire(struct spinlock*);
......@@ -242,8 +250,7 @@ void uartputc(char c);
void uartintr(void);
// vm.c
void switchuvm(struct proc*);
void switchkvm(void);
void switchvm(struct proc*);
int pagefault(struct vmap *, uptr, u32);
// wq.c
......
......@@ -130,5 +130,5 @@ struct taskstate
(u32) ((u64)(rip)>>32), 0, \
}
#define PROC_KSTACK_OFFSET 40
#define PROC_KSTACK_OFFSET 48
#define TRAPFRAME_SIZE 192
......@@ -34,14 +34,14 @@ struct mtrace_stacks {
};
#endif
enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
enum procstate { EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Per-process state
struct proc : public rcu_freed {
struct vmap *vmap; // va -> vma
uptr brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process
enum procstate state; // Process state
enum procstate _state; // Process state
volatile int pid; // Process ID
struct proc *parent; // Parent process
struct trapframe *tf; // Trap frame for current syscall
......@@ -74,7 +74,7 @@ struct proc : public rcu_freed {
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
_state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{
......@@ -98,3 +98,36 @@ struct proc : public rcu_freed {
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(proc)
};
static inline void
set_proc_state(struct proc *p, enum procstate s)
{
switch(p->_state) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
p->_state = s;
}
static inline enum procstate
get_proc_state(struct proc *p)
{
return p->_state;
}
......@@ -2,3 +2,4 @@ void delrun(struct proc*);
struct proc* schednext(void);
int steal(void);
void addrun(struct proc*);
......@@ -13,6 +13,7 @@ OBJS = \
file.o \
fmt.o \
fs.o \
idle.o \
ioapic.o \
lapic.o \
hwvm.o \
......
......@@ -36,7 +36,6 @@
#include "queue.h"
#include "proc.hh"
#include "mtrace.h"
#include "qlock.h"
#define NSLOTS (1 << CILKSHIFT)
......@@ -44,7 +43,7 @@ struct cilkqueue {
struct cilkthread *thread[NSLOTS];
volatile int head __mpalign__;
qlock_t lock;
struct spinlock lock;
volatile int tail;
__padout__;
} __mpalign__;
......@@ -65,8 +64,8 @@ struct cilkstat {
__padout__;
} __mpalign__;
struct cilkqueue queue[NCPU] __mpalign__;
struct cilkstat stat[NCPU] __mpalign__;
static struct cilkqueue queue[NCPU] __mpalign__;
static struct cilkstat stat[NCPU] __mpalign__;
static struct cilkqueue *
cilk_cur(void)
......@@ -107,18 +106,17 @@ __cilk_push(struct cilkqueue *q, struct cilkthread *t)
static struct cilkthread *
__cilk_pop(struct cilkqueue *q)
{
struct qnode qn;
int i;
ql_lock(&q->lock, &qn);
acquire(&q->lock);
i = q->head;
if ((i - q->tail) == 0) {
ql_unlock(&q->lock, &qn);
release(&q->lock);
return 0;
}
i = (i-1) & (NSLOTS-1);
q->head--;
ql_unlock(&q->lock, &qn);
release(&q->lock);
cilk_stat()->pop++;
return q->thread[i];
......@@ -127,18 +125,17 @@ __cilk_pop(struct cilkqueue *q)
static struct cilkthread *
__cilk_steal(struct cilkqueue *q)
{
struct qnode qn;
int i;
ql_lock(&q->lock, &qn);
acquire(&q->lock);
i = q->tail;
if ((i - q->head) == 0) {
ql_unlock(&q->lock, &qn);
release(&q->lock);
return 0;
}
i = i & (NSLOTS-1);
q->tail++;
ql_unlock(&q->lock, &qn);
release(&q->lock);
cilk_stat()->steal++;
return q->thread[i];
......@@ -161,9 +158,8 @@ __cilk_run(struct cilkthread *th)
// Guarantees some core will at some point execute the work.
// The current core might execute the work immediately.
void
cilk_push(void *rip, u64 arg0, u64 arg1)
cilk_push(void (*fn)(uptr, uptr), u64 arg0, u64 arg1)
{
void (*fn)(uptr, uptr) = (void(*)(uptr,uptr))rip;
struct cilkthread *th;
th = (struct cilkthread *) kalloc();
......@@ -171,7 +167,7 @@ cilk_push(void *rip, u64 arg0, u64 arg1)
fn(arg0, arg1);
return;
}
th->rip = (uptr) rip;
th->rip = (uptr) fn;
th->arg0 = arg0;
th->arg1 = arg1;
th->frame = cilk_frame();
......@@ -281,7 +277,7 @@ testcilk(void)
s = rdtsc();
cilk_start();
for (i = 0; i < iters; i++)
cilk_push((void*) __test_stub, i, i);
cilk_push(__test_stub, i, i);
cilk_end();
e = rdtsc();
cprintf("testcilk: %lu\n", (e-s)/iters);
......@@ -306,6 +302,6 @@ initcilk(void)
int i;
for (i = 0; i < NCPU; i++)
ql_init(&queue[i].lock, "queue lock");
initlock(&queue[i].lock, "queue lock", LOCKSTAT_CILK);
}
#endif // CILKENABLE
......@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters);
myproc()->oncv = cv;
myproc()->state = SLEEPING;
set_proc_state(myproc(), SLEEPING);
if (timeout) {
acquire(&sleepers_lock);
......@@ -92,10 +92,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
}
release(&cv->lock);
sched();
release(&myproc()->lock);
// Reacquire original lock.
acquire(lk);
}
......@@ -115,9 +112,9 @@ cv_wakeup(struct condvar *cv)
acquire(&cv->lock);
LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) {
acquire(&p->lock);
if (p->state != SLEEPING)
if (get_proc_state(p) != SLEEPING)
panic("cv_wakeup: pid %u name %s state %u",
p->pid, p->name, p->state);
p->pid, p->name, get_proc_state(p));
if (p->oncv != cv)
panic("cv_wakeup: pid %u name %s p->cv %p cv %p",
p->pid, p->name, p->oncv, cv);
......
......@@ -213,7 +213,7 @@ exec(const char *path, char **argv)
myproc()->brk = BRK + 8; // XXX so that brk-1 points within heap vma..
myproc()->tf->rip = elf.entry; // main
switchuvm(myproc());
switchvm(myproc());
oldvmap->decref();
gc_end_epoch();
......
......@@ -106,7 +106,7 @@ setupkshared(pgmap *pml4, char *kshared)
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
static void
switchkvm(void)
{
lcr3(v2p(&kpml4)); // switch to the kernel page table
......@@ -114,7 +114,7 @@ switchkvm(void)
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm(struct proc *p)
switchvm(struct proc *p)
{
u64 base = (u64) &mycpu()->ts;
pushcli();
......@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0)
panic("switchuvm: no vmap/pml4");
lcr3(v2p(p->vmap->pml4)); // switch to new address space
if (p->vmap != 0 && p->vmap->pml4 != 0)
lcr3(v2p(p->vmap->pml4)); // switch to new address space
else
switchkvm();
popcli();
}
......
#include "types.h"
#include "kernel.hh"
#include "amd64.h"
#include "spinlock.h"
#include "condvar.h"
#include "proc.hh"
#include "cpu.hh"
#include "sched.hh"
struct proc *idlep[NCPU] __mpalign__;
void
idleloop(void)
{
// Test the work queue
//extern void testwq(void);
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
//mtstart(scheduler, idlep);
sti();
for (;;) {
acquire(&myproc()->lock);
set_proc_state(myproc(), RUNNABLE);
sched();
if (steal() == 0) {
int worked;
do {
assert(mycpu()->ncli == 0);
worked = wq_trywork();
} while(worked);
sti();
}
}
}
void
initidle(void)
{
// allocate a fake PID for each scheduler thread
struct proc *p = allocproc();
if (!p)
panic("initidle allocproc");
snprintf(p->name, sizeof(p->name), "idle_%u", cpunum());
mycpu()->proc = p;
myproc()->cpu_pin = 1;
idlep[cpunum()] = p;
}
......@@ -9,6 +9,9 @@
#include "condvar.h"
#include "proc.hh"
extern void initidle(void);
extern void idleloop(void);
static volatile int bstate;
void
......@@ -18,9 +21,10 @@ mpboot(void)
inittls();
initlapic();
initsamp();
initidle();
initnmi();
bstate = 1;
scheduler(); // start running processes
idleloop();
}
static void
......@@ -90,6 +94,7 @@ cmain(u64 mbmagic, u64 mbaddr)
initlockstat();
initpci();
initnet();
initidle();
if (VERBOSE)
cprintf("ncpu %d %lu MHz\n", ncpu, cpuhz / 1000000);
......@@ -107,6 +112,7 @@ cmain(u64 mbmagic, u64 mbaddr)
panic("TRAPFRAME_SIZE mismatch: %d %ld\n",
TRAPFRAME_SIZE, sizeof(trapframe));
scheduler();
idleloop();
panic("Unreachable");
}
......@@ -20,7 +20,7 @@ proc_hash(const u32 &p)
return p;
}
int __mpalign__ idle[NCPU];
//int __mpalign__ idle[NCPU];
xns<u32, proc*, proc_hash> *xnspid __mpalign__;
static struct proc *bootproc __mpalign__;
......@@ -30,84 +30,22 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 };
void
sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(myproc()->state == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (myproc()->state == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
swtch(&myproc()->context, mycpu()->scheduler);
mycpu()->intena = intena;
}
// Give up the CPU for one scheduling round.
void
yield(void)
{
acquire(&myproc()->lock); //DOC: yieldlock
myproc()->state = RUNNABLE;
set_proc_state(myproc(), RUNNABLE);
sched();
release(&myproc()->lock);
}
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
}
// A fork child's very first scheduling by scheduler()
// will swtch here. "Return" to user space.
static void
void
forkret(void)
{
// Still holding proc->lock from scheduler.
release(&myproc()->lock);
post_swtch();
// Just for the first process. can't do it earlier
// b/c file system code needs a process context
......@@ -153,7 +91,7 @@ exit(void)
SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) {
acquire(&p->lock);
p->parent = bootproc;
if(p->state == ZOMBIE)
if(get_proc_state(p) == ZOMBIE)
wakeupinit = 1;
SLIST_REMOVE(&(myproc()->childq), p, proc, child_next);
release(&p->lock);
......@@ -172,7 +110,7 @@ exit(void)
cv_wakeup(&bootproc->cv);
// Jump into the scheduler, never to return.
myproc()->state = ZOMBIE;
set_proc_state(myproc(), ZOMBIE);
sched();
panic("zombie exit");
}
......@@ -187,7 +125,7 @@ freeproc(struct proc *p)
// If found, change state to EMBRYO and initialize
// state required to run in the kernel.
// Otherwise return 0.
static struct proc*
struct proc*
allocproc(void)
{
struct proc *p;
......@@ -276,104 +214,9 @@ inituser(void)
void
initproc(void)
{
int c;
xnspid = new xns<u32, proc*, proc_hash>(false);
if (xnspid == 0)
panic("pinit");
for (c = 0; c < NCPU; c++)
idle[c] = 1;
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
void
scheduler(void)
{
// allocate a fake PID for each scheduler thread
struct proc *schedp = allocproc();
if (!schedp)
panic("scheduler allocproc");
snprintf(schedp->name, sizeof(schedp->name), "scheduler_%u", cpunum());
mycpu()->proc = schedp;
myproc()->cpu_pin = 1;
// Test the work queue
//extern void testwq(void);
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtstart(scheduler, schedp);
for(;;){
// Enable interrupts on this processor.
sti();
struct proc *p = schednext();
if (p) {
acquire(&p->lock);
if (p->state != RUNNABLE) {
release(&p->lock);
} else {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu()->proc = p;
switchuvm(p);
p->state = RUNNING;
p->tsc = rdtsc();
mtpause(schedp);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)threadstub)
{
mtresume(p);
}
mtrec();
swtch(&mycpu()->scheduler, myproc()->context);
mtresume(schedp);
mtign();
switchkvm();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu()->proc = schedp;
if (p->state != RUNNABLE)
delrun(p);
release(&p->lock);
}
} else {
if (steal()) {
if (idle[mycpu()->id])
idle[mycpu()->id] = 0;
} else {
if (!idle[mycpu()->id])
idle[mycpu()->id] = 1;
}
}
if (idle[mycpu()->id]) {
int worked;
do {
assert(mycpu()->ncli == 0);
worked = wq_trywork();
} while(worked);
sti();
}
}
}
// Grow/shrink current process's memory by n bytes.
......@@ -456,7 +299,7 @@ kill(int pid)
}
acquire(&p->lock);
p->killed = 1;
if(p->state == SLEEPING){
if(get_proc_state(p) == SLEEPING){
// XXX
// we need to wake p up if it is cv_sleep()ing.
// can't change p from SLEEPING to RUNNABLE since that
......@@ -479,7 +322,6 @@ void
procdumpall(void)
{
static const char *states[] = {
/* [UNUSED] = */ "unused",
/* [EMBRYO] = */ "embryo",
/* [SLEEPING] = */ "sleep ",
/* [RUNNABLE] = */ "runble",
......@@ -491,8 +333,9 @@ procdumpall(void)
uptr pc[10];
for (proc *p : xnspid) {
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
state = states[p->state];
if(get_proc_state(p) >= 0 && get_proc_state(p) < NELEM(states) &&
states[get_proc_state(p)])
state = states[get_proc_state(p)];
else
state = "???";
......@@ -502,7 +345,7 @@ procdumpall(void)
cprintf("\n%-3d %-10s %8s %2u %lu\n",
p->pid, name, state, p->cpuid, p->tsc);
if(p->state == SLEEPING){
if(get_proc_state(p) == SLEEPING){
getcallerpcs((void*)p->context->rbp, pc, NELEM(pc));
for(int i=0; i<10 && pc[i] != 0; i++)
cprintf(" %lx\n", pc[i]);
......@@ -531,7 +374,6 @@ fork(int flags)
if((np->vmap = myproc()->vmap->copy(cow)) == 0){
ksfree(slab_stack, np->kstack);
np->kstack = 0;
np->state = UNUSED;
if (!xnspid->remove(np->pid, &np))
panic("fork: ns_remove");
freeproc(np);
......@@ -559,14 +401,11 @@ fork(int flags)
SLIST_INSERT_HEAD(&myproc()->childq, np, child_next);
release(&myproc()->lock);
if (migrate(np)) {
acquire(&np->lock);
np->cpuid = mycpu()->id;
addrun(np);
release(&np->lock);
}
acquire(&np->lock);
np->cpuid = mycpu()->id;
addrun(np);
release(&np->lock);
// cprintf("%d: fork done (pid %d)\n", myproc()->pid, pid);
return pid;
}
......@@ -585,7 +424,7 @@ wait(void)
SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) {
havekids = 1;
acquire(&p->lock);
if(p->state == ZOMBIE){
if(get_proc_state(p) == ZOMBIE){
release(&p->lock); // noone else better be trying to lock p
pid = p->pid;
SLIST_REMOVE(&myproc()->childq, p, proc, child_next);
......@@ -593,7 +432,6 @@ wait(void)
ksfree(slab_stack, p->kstack);
p->kstack = 0;
p->vmap->decref();
p->state = UNUSED;
if (!xnspid->remove(p->pid, &p))
panic("wait: ns_remove");
p->pid = 0;
......@@ -622,7 +460,7 @@ wait(void)
void
threadhelper(void (*fn)(void *), void *arg)
{
release(&myproc()->lock); // initially held by scheduler
post_swtch();
mtstart(fn, myproc());
fn(arg);
exit();
......@@ -664,7 +502,6 @@ threadpin(void (*fn)(void*), void *arg, const char *name, int cpu)
p->cpuid = cpu;
p->cpu_pin = 1;
acquire(&p->lock);
p->state = RUNNABLE;
addrun(p);
release(&p->lock);
}
......@@ -24,12 +24,82 @@ struct runq {
static struct runq runq[NCPU] __mpalign__;
void
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
mycpu()->prev != idlep[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
}
void
sched(void)
{
extern void threadstub(void);
extern void forkret(void);
int intena;
#if SPINLOCK_DEBUG
if(!holding(&myproc()->lock))
panic("sched proc->lock");
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
mtign();
struct proc *next = schednext();
if (next == nullptr) {
if (get_proc_state(myproc()) != RUNNABLE) {
next = idlep[mycpu()->id];
} else {
set_proc_state(myproc(), RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
return;
}
}
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchvm(next);
set_proc_state(next, RUNNING);
next->tsc = rdtsc();
mtpause(next);
if (next->context->rip != (uptr)forkret &&
next->context->rip != (uptr)threadstub)
{
mtresume(next);
}
mtrec();
swtch(&prev->context, next->context);
mycpu()->intena = intena;
post_swtch();
}
void
addrun(struct proc *p)
{
// Always called with p->lock held
struct runq *q;
p->state = RUNNABLE;
set_proc_state(p, RUNNABLE);
q = &runq[p->cpuid];
acquire(&q->lock);
......@@ -71,9 +141,10 @@ steal(void)
if (tryacquire(&q->lock) == 0)
continue;
STAILQ_FOREACH(p, &q->q, runqlink) {
if (p->state == RUNNABLE && !p->cpu_pin &&
if (get_proc_state(p) == RUNNABLE && !p->cpu_pin &&
p->curcycles != 0 && p->curcycles > VICTIMAGE)
{
STAILQ_REMOVE(&q->q, p, proc, runqlink);
steal = p;
break;
}
......@@ -82,10 +153,9 @@ steal(void)
if (steal) {
acquire(&steal->lock);
if (steal->state == RUNNABLE && !steal->cpu_pin &&
if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{
delrun(steal);
steal->curcycles = 0;
steal->cpuid = mycpu()->id;
addrun(steal);
......@@ -93,6 +163,8 @@ steal(void)
r = 1;
break;
}
if (get_proc_state(steal) == RUNNABLE)
addrun(steal);
release(&steal->lock);
}
}
......@@ -112,10 +184,8 @@ schednext(void)
q = &runq[mycpu()->id];
acquire(&q->lock);
p = STAILQ_LAST(&q->q, proc, runqlink);
if (p) {
if (p)
STAILQ_REMOVE(&q->q, p, proc, runqlink);
STAILQ_INSERT_HEAD(&q->q, p, runqlink);
}
release(&q->lock);
popcli();
return p;
......@@ -127,7 +197,43 @@ initsched(void)
int i;
for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q);
}
}
#if 0
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
}
#endif
......@@ -179,7 +179,7 @@ trap(struct trapframe *tf)
// Force process to give up CPU on clock tick.
// If interrupts were on while locks held, would need to check nlock.
if(myproc() && myproc()->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
if(myproc() && get_proc_state(myproc()) == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
yield();
// Check if the process has been killed since we yielded
......
......@@ -220,7 +220,7 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
safestrcpy(p->name, name, sizeof(p->name));
acquire(&p->lock);
p->state = RUNNABLE;
set_proc_state(p, RUNNABLE);
addrun(p);
release(&p->lock);
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论