提交 a02648fc 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Move scheduling stuff to sched.c.

上级 d3c7235d
...@@ -46,6 +46,7 @@ OBJS = \ ...@@ -46,6 +46,7 @@ OBJS = \
pipe.o \ pipe.o \
proc.o \ proc.o \
rcu.o \ rcu.o \
sched.o \
spinlock.o \ spinlock.o \
swtch.o \ swtch.o \
string.o \ string.o \
......
...@@ -11,10 +11,10 @@ ...@@ -11,10 +11,10 @@
#include "bits.h" #include "bits.h"
#include "kmtrace.h" #include "kmtrace.h"
#include "vm.h" #include "vm.h"
#include "sched.h"
int __mpalign__ idle[NCPU]; int __mpalign__ idle[NCPU];
struct ns *nspid __mpalign__; struct ns *nspid __mpalign__;
struct ns *nsrunq __mpalign__;
static struct proc *bootproc __mpalign__; static struct proc *bootproc __mpalign__;
#if MTRACE #if MTRACE
...@@ -60,42 +60,6 @@ yield(void) ...@@ -60,42 +60,6 @@ yield(void)
release(&myproc()->lock); release(&myproc()->lock);
} }
// Mark a process RUNNABLE and add it to the runq
// of its cpu. Caller must hold p->lock so that
// some other core doesn't start running the
// process before the caller has finished setting
// the process up, and to cope with racing callers
// e.g. two wakeups on same process. and to
// allow atomic addrun(); sched();
void
addrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("addrun no p->lock");
#endif
if (p->on_runq >= 0)
panic("addrun on runq already");
ns_insert(nsrunq, KI(p->cpuid), p);
p->on_runq = p->cpuid;
}
void
delrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("delrun no p->lock");
#endif
if (p->on_runq < 0)
panic("delrun not on runq");
if (ns_remove(nsrunq, KI(p->on_runq), p) == 0)
panic("delrun: ns_remove");
p->on_runq = -1;
}
void void
migrate(struct proc *p) migrate(struct proc *p)
{ {
...@@ -325,9 +289,7 @@ initproc(void) ...@@ -325,9 +289,7 @@ initproc(void)
if (nspid == 0) if (nspid == 0)
panic("pinit"); panic("pinit");
nsrunq = nsalloc(1); initsched();
if (nsrunq == 0)
panic("pinit runq");
for (c = 0; c < NCPU; c++) for (c = 0; c < NCPU; c++)
idle[c] = 1; idle[c] = 1;
...@@ -341,49 +303,6 @@ initproc(void) ...@@ -341,49 +303,6 @@ initproc(void)
// - eventually that process transfers control // - eventually that process transfers control
// via swtch back to the scheduler. // via swtch back to the scheduler.
static void *
choose_runnable(void *pp, void *arg)
{
struct proc *p = pp;
if (p->state == RUNNABLE)
return p;
return 0;
}
static void *
steal_cb(void *vk, void *v, void *arg)
{
struct proc *p = v;
acquire(&p->lock);
if (p->state != RUNNABLE || p->cpuid == mycpu()->id || p->cpu_pin) {
release(&p->lock);
return 0;
}
if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
if (sched_debug)
cprintf("cpu%d: steal %d (cycles=%d) from %d\n",
mycpu()->id, p->pid, (int)p->curcycles, p->cpuid);
delrun(p);
p->curcycles = 0;
p->cpuid = mycpu()->id;
addrun(p);
release(&p->lock);
return p;
}
release(&p->lock);
return 0;
}
int
steal(void)
{
void *stole = ns_enumerate(nsrunq, steal_cb, 0);
return stole ? 1 : 0;
}
void void
scheduler(void) scheduler(void)
{ {
...@@ -403,7 +322,7 @@ scheduler(void) ...@@ -403,7 +322,7 @@ scheduler(void)
// Enable interrupts on this processor. // Enable interrupts on this processor.
sti(); sti();
struct proc *p = ns_enumerate_key(nsrunq, KI(mycpu()->id), choose_runnable, 0); struct proc *p = schednext();
if (p) { if (p) {
acquire(&p->lock); acquire(&p->lock);
if (p->state != RUNNABLE) { if (p->state != RUNNABLE) {
...@@ -595,7 +514,7 @@ void *procdump(void *vk, void *v, void *arg) ...@@ -595,7 +514,7 @@ void *procdump(void *vk, void *v, void *arg)
state = states[p->state]; state = states[p->state];
else else
state = "???"; state = "???";
cprintf("%d %s %s %d, ", p->pid, state, p->name, p->cpuid); cprintf("%d %s %s %d %lu", p->pid, state, p->name, p->cpuid, p->tsc);
// XXX(sbw) // XXX(sbw)
#if 0 #if 0
......
#include "types.h"
#include "kernel.h"
#include "param.h"
#include "mmu.h"
#include "amd64.h"
#include "spinlock.h"
#include "condvar.h"
#include "queue.h"
#include "proc.h"
#include "cpu.h"
#include "bits.h"
#include "kmtrace.h"
#include "vm.h"
#include "sched.h"
enum { sched_debug = 0 };
struct ns *nsrunq __mpalign__;
// Mark a process RUNNABLE and add it to the runq
// of its cpu. Caller must hold p->lock so that
// some other core doesn't start running the
// process before the caller has finished setting
// the process up, and to cope with racing callers
// e.g. two wakeups on same process. and to
// allow atomic addrun(); sched();
void
addrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("addrun no p->lock");
#endif
if (p->on_runq >= 0)
panic("addrun on runq already");
ns_insert(nsrunq, KI(p->cpuid), p);
p->on_runq = p->cpuid;
}
void
delrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("delrun no p->lock");
#endif
if (p->on_runq < 0)
panic("delrun not on runq");
if (ns_remove(nsrunq, KI(p->on_runq), p) == 0)
panic("delrun: ns_remove");
p->on_runq = -1;
}
static void *
steal_cb(void *vk, void *v, void *arg)
{
struct proc *p = v;
acquire(&p->lock);
if (p->state != RUNNABLE || p->cpuid == mycpu()->id || p->cpu_pin) {
release(&p->lock);
return 0;
}
if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
if (sched_debug)
cprintf("cpu%d: steal %d (cycles=%d) from %d\n",
mycpu()->id, p->pid, (int)p->curcycles, p->cpuid);
delrun(p);
p->curcycles = 0;
p->cpuid = mycpu()->id;
addrun(p);
release(&p->lock);
return p;
}
release(&p->lock);
return 0;
}
int
steal(void)
{
void *stole = ns_enumerate(nsrunq, steal_cb, 0);
return stole ? 1 : 0;
}
static void *
choose_runnable(void *pp, void *arg)
{
struct proc *head = pp;
if (head->state == RUNNABLE)
return head;
return 0;
}
struct proc *
schednext(void)
{
return ns_enumerate_key(nsrunq, KI(mycpu()->id), choose_runnable, 0);
}
void
initsched(void)
{
nsrunq = nsalloc(1);
if (nsrunq == 0)
panic("pinit runq");
}
void delrun(struct proc*);
void initsched(void);
struct proc* schednext(void);
int steal(void);
void addrun(struct proc*);
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论