Try to separate process schedules from other scheduling related code

上级 7c717316
...@@ -189,6 +189,8 @@ void addrun(struct proc *); ...@@ -189,6 +189,8 @@ void addrun(struct proc *);
void sched(void); void sched(void);
void post_swtch(void); void post_swtch(void);
void scheddump(void); void scheddump(void);
int steal(void);
void addrun(struct proc*);
// spinlock.c // spinlock.c
void acquire(struct spinlock*); void acquire(struct spinlock*);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "fs.h" #include "fs.h"
#include "file.hh" #include "file.hh"
#include "filetable.hh" #include "filetable.hh"
#include "sched.hh"
class uwq; class uwq;
class uwq_worker; class uwq_worker;
...@@ -44,7 +45,7 @@ typedef enum procstate { ...@@ -44,7 +45,7 @@ typedef enum procstate {
} procstate_t;; } procstate_t;;
// Per-process state // Per-process state
struct proc : public rcu_freed { struct proc : public rcu_freed, public sched_link {
struct vmap *vmap; // va -> vma struct vmap *vmap; // va -> vma
uwq* uwq; uwq* uwq;
uwq_worker* worker; uwq_worker* worker;
...@@ -70,9 +71,6 @@ struct proc : public rcu_freed { ...@@ -70,9 +71,6 @@ struct proc : public rcu_freed {
#if MTRACE #if MTRACE
struct mtrace_stacks mtrace_stacks; struct mtrace_stacks mtrace_stacks;
#endif #endif
struct runq *runq;
STAILQ_ENTRY(proc) runqlink;
struct condvar *oncv; // Where it is sleeping, for kill() struct condvar *oncv; // Where it is sleeping, for kill()
u64 cv_wakeup; // Wakeup time for this process u64 cv_wakeup; // Wakeup time for this process
LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv
......
void delrun(struct proc*); struct sched_link
struct proc* schednext(void); {
int steal(void); sched_link* prev;
void addrun(struct proc*); sched_link* next;
};
...@@ -271,6 +271,9 @@ consoleintr(int (*getc)(void)) ...@@ -271,6 +271,9 @@ consoleintr(int (*getc)(void))
case C('W'): case C('W'):
wq_dump(); wq_dump();
break; break;
case C('S'):
scheddump();
break;
case C('F'): // kmem stats case C('F'): // kmem stats
kmemprint(); kmemprint();
break; break;
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include "condvar.h" #include "condvar.h"
#include "proc.hh" #include "proc.hh"
#include "cpu.hh" #include "cpu.hh"
#include "sched.hh"
#include "percpu.hh" #include "percpu.hh"
#include "wq.hh" #include "wq.hh"
#include "uwq.hh" #include "uwq.hh"
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include "cpu.hh" #include "cpu.hh"
#include "bits.hh" #include "bits.hh"
#include "kmtrace.hh" #include "kmtrace.hh"
#include "sched.hh"
#include "kalloc.hh" #include "kalloc.hh"
#include "vm.hh" #include "vm.hh"
#include "ns.hh" #include "ns.hh"
...@@ -41,7 +40,7 @@ proc::proc(int npid) : ...@@ -41,7 +40,7 @@ proc::proc(int npid) :
rcu_freed("proc"), vmap(0), uwq(0), worker(0), kstack(0), rcu_freed("proc"), vmap(0), uwq(0), worker(0), kstack(0),
pid(npid), parent(0), tf(0), context(0), killed(0), pid(npid), parent(0), tf(0), context(0), killed(0),
ftable(0), cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0), ftable(0), cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
cpu_pin(0), runq(0), oncv(0), cv_wakeup(0), cpu_pin(0), oncv(0), cv_wakeup(0),
user_fs_(0), unmap_tlbreq_(0), in_exec_(0), uaccess_(0), user_fs_(0), unmap_tlbreq_(0), in_exec_(0), uaccess_(0),
exception_inuse(0), state_(EMBRYO) exception_inuse(0), state_(EMBRYO)
{ {
...@@ -51,7 +50,6 @@ proc::proc(int npid) : ...@@ -51,7 +50,6 @@ proc::proc(int npid) :
memset(&childq, 0, sizeof(childq)); memset(&childq, 0, sizeof(childq));
memset(&child_next, 0, sizeof(child_next)); memset(&child_next, 0, sizeof(child_next));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters)); memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep)); memset(&cv_sleep, 0, sizeof(cv_sleep));
memset(__cxa_eh_global, 0, sizeof(__cxa_eh_global)); memset(__cxa_eh_global, 0, sizeof(__cxa_eh_global));
......
...@@ -9,21 +9,145 @@ ...@@ -9,21 +9,145 @@
#include "cpu.hh" #include "cpu.hh"
#include "bits.hh" #include "bits.hh"
#include "kmtrace.hh" #include "kmtrace.hh"
#include "sched.hh"
#include "vm.hh" #include "vm.hh"
#include "wq.hh" #include "wq.hh"
#include "percpu.hh"
#include "sperf.hh"
enum { sched_debug = 0 }; enum { sched_debug = 0 };
enum { steal_nonexec = 1 }; enum { steal_nonexec = 1 };
struct runq { class schedule
STAILQ_HEAD(queue, proc) q; {
struct spinlock lock; public:
volatile u64 len __mpalign__; schedule();
__padout__; void enq(proc* entry);
proc* deq();
proc* steal(bool nonexec);
void dump();
static void* operator new(unsigned long nbytes, schedule* buf) {
assert(nbytes == sizeof(schedule));
return buf;
}
private:
struct spinlock lock_;
sched_link head_;
void sanity(void);
struct {
std::atomic<u64> enqs;
std::atomic<u64> deqs;
std::atomic<u64> steals;
} stats_;
u64 ncansteal_ __mpalign__;
}; };
percpu<schedule> schedule_;
static struct runq runq[NCPU] __mpalign__; static bool cansteal(proc* p, bool nonexec);
schedule::schedule(void)
{
initlock(&lock_, "schedule::lock_", LOCKSTAT_SCHED);
head_.next = &head_;
head_.prev = &head_;
ncansteal_ = 0;
stats_.enqs = 0;
stats_.deqs = 0;
stats_.steals = 0;
}
void
schedule::enq(proc* p)
{
sched_link* entry = p;
// Add to tail
scoped_acquire x(&lock_);
entry->next = &head_;
entry->prev = head_.prev;
head_.prev->next = entry;
head_.prev = entry;
if (cansteal((proc*)entry, true))
ncansteal_++;
sanity();
stats_.enqs++;
}
proc*
schedule::deq(void)
{
// Remove from head
scoped_acquire x(&lock_);
sched_link* entry = head_.next;
if (entry == &head_)
return nullptr;
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
if (cansteal((proc*)entry, true))
ncansteal_--;
sanity();
stats_.deqs++;
return (proc*)entry;
}
proc*
schedule::steal(bool nonexec)
{
if (ncansteal_ == 0 || !tryacquire(&lock_))
return nullptr;
for (sched_link* ptr = head_.next; ptr != &head_; ptr = ptr->next)
if (cansteal((proc*)ptr, nonexec)) {
ptr->next->prev = ptr->prev;
ptr->prev->next = ptr->next;
ncansteal_--;
sanity();
stats_.steals++;
release(&lock_);
return (proc*)ptr;
}
release(&lock_);
return nullptr;
}
void
schedule::dump(void)
{
cprintf("%lu %lu %lu\n",
stats_.enqs.load(),
stats_.deqs.load(),
stats_.steals.load());
stats_.enqs = 0;
stats_.deqs = 0;
stats_.steals = 0;
}
void
schedule::sanity(void)
{
#if DEBUG
u64 n = 0;
for (sched_link* ptr = head_.next; ptr != &head_; ptr = ptr->next)
if (cansteal((proc*)ptr, true))
n++;
if (n != ncansteal_)
panic("schedule::sanity: %lu != %lu", n, ncansteal_);
#endif
}
static bool
cansteal(proc* p, bool nonexec)
{
return (p->get_state() == RUNNABLE && !p->cpu_pin &&
(p->in_exec_ || nonexec) &&
p->curcycles != 0 && p->curcycles > VICTIMAGE);
}
void void
post_swtch(void) post_swtch(void)
...@@ -35,14 +159,67 @@ post_swtch(void) ...@@ -35,14 +159,67 @@ post_swtch(void)
wqcrit_trywork(); wqcrit_trywork();
} }
int
steal(void)
{
struct proc *steal;
int r = 0;
pushcli();
for (int nonexec = 0; nonexec < (steal_nonexec ? 2 : 1); nonexec++) {
for (int i = 1; i < ncpu; i++) {
steal = schedule_[i].steal(nonexec);
if (steal != nullptr) {
acquire(&steal->lock);
if (steal->get_state() == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{
steal->curcycles = 0;
steal->cpuid = mycpu()->id;
addrun(steal);
release(&steal->lock);
r = 1;
goto found;
}
if (steal->get_state() == RUNNABLE) {
addrun(steal);
}
release(&steal->lock);
}
}
}
found:
popcli();
return r;
}
void
scheddump(void)
{
for (int i = 0; i < NCPU; i++) {
cprintf("%u ", i);
schedule_[i].dump();
}
}
void
addrun(struct proc* p)
{
p->set_state(RUNNABLE);
schedule_[p->cpuid].enq(p);
}
void void
sched(void) sched(void)
{ {
extern void threadstub(void); extern void threadstub(void);
extern void forkret(void); extern void forkret(void);
extern void idleheir(void *x); extern void idleheir(void *x);
int intena; int intena;
proc* prev;
proc* next;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
if(!holding(&myproc()->lock)) if(!holding(&myproc()->lock))
...@@ -53,6 +230,7 @@ sched(void) ...@@ -53,6 +230,7 @@ sched(void)
extern void idlebequeath(void); extern void idlebequeath(void);
idlebequeath(); idlebequeath();
} }
if(mycpu()->ncli != 1) if(mycpu()->ncli != 1)
panic("sched locks"); panic("sched locks");
if(myproc()->get_state() == RUNNING) if(myproc()->get_state() == RUNNING)
...@@ -62,7 +240,9 @@ sched(void) ...@@ -62,7 +240,9 @@ sched(void)
intena = mycpu()->intena; intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc; myproc()->curcycles += rdtsc() - myproc()->tsc;
struct proc *next = schednext(); // Interrupts are disabled
next = schedule_->deq();
if (next == nullptr) { if (next == nullptr) {
if (myproc()->get_state() != RUNNABLE || if (myproc()->get_state() != RUNNABLE ||
// proc changed its CPU pin? // proc changed its CPU pin?
...@@ -79,7 +259,7 @@ sched(void) ...@@ -79,7 +259,7 @@ sched(void)
if (next->get_state() != RUNNABLE) if (next->get_state() != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, next->get_state()); panic("non-RUNNABLE next %s %u", next->name, next->get_state());
struct proc *prev = myproc(); prev = myproc();
mycpu()->proc = next; mycpu()->proc = next;
mycpu()->prev = prev; mycpu()->prev = prev;
...@@ -107,158 +287,8 @@ sched(void) ...@@ -107,158 +287,8 @@ sched(void)
} }
void void
addrun(struct proc *p)
{
// Always called with p->lock held
struct runq *q;
p->set_state(RUNNABLE);
q = &runq[p->cpuid];
acquire(&q->lock);
STAILQ_INSERT_HEAD(&q->q, p, runqlink);
p->runq = q;
q->len++;
release(&q->lock);
}
void
delrun(struct proc *p)
{
// Always called with p->lock held
struct runq *q;
q = p->runq;
acquire(&q->lock);
STAILQ_REMOVE(&q->q, p, proc, runqlink);
q->len--;
release(&q->lock);
}
int
steal(void)
{
struct proc *steal;
int r = 0;
pushcli();
for (int nonexec = 0; nonexec < (steal_nonexec ? 2 : 1); nonexec++) {
for (int i = 1; i < ncpu; i++) {
struct runq *q = &runq[(i+mycpu()->id) % ncpu];
struct proc *p;
if (q->len == 0)
continue;
// XXX(sbw) Look for a process to steal. Acquiring q->lock
// then p->lock can result in deadlock. So we acquire
// q->lock, scan for a process, drop q->lock, acquire p->lock,
// and then check that it's still ok to steal p.
steal = nullptr;
if (tryacquire(&q->lock) == 0)
continue;
STAILQ_FOREACH(p, &q->q, runqlink) {
if (p->get_state() == RUNNABLE && !p->cpu_pin &&
(p->in_exec_ || nonexec) &&
p->curcycles != 0 && p->curcycles > VICTIMAGE)
{
STAILQ_REMOVE(&q->q, p, proc, runqlink);
q->len--;
steal = p;
break;
}
}
release(&q->lock);
if (steal) {
acquire(&steal->lock);
if (steal->get_state() == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{
steal->curcycles = 0;
steal->cpuid = mycpu()->id;
addrun(steal);
release(&steal->lock);
r = 1;
goto found;
}
if (steal->get_state() == RUNNABLE)
addrun(steal);
release(&steal->lock);
}
}
}
found:
popcli();
return r;
}
struct proc *
schednext(void)
{
// No locks, interrupts enabled
struct runq *q;
struct proc *p = nullptr;
pushcli();
q = &runq[mycpu()->id];
acquire(&q->lock);
p = STAILQ_LAST(&q->q, proc, runqlink);
if (p) {
STAILQ_REMOVE(&q->q, p, proc, runqlink);
q->len--;
}
release(&q->lock);
popcli();
return p;
}
void
initsched(void) initsched(void)
{ {
int i; for (int i = 0; i < NCPU; i++)
new (&schedule_[i]) schedule();
for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q);
runq[i].len = 0;
}
} }
#if 0
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
}
#endif
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论