Some proc clean up

上级 0b5352c5
......@@ -130,5 +130,5 @@ struct taskstate
(u32) ((u64)(rip)>>32), 0, \
}
#define PROC_KSTACK_OFFSET 40
#define PROC_KSTACK_OFFSET 48
#define TRAPFRAME_SIZE 192
......@@ -41,7 +41,6 @@ struct proc : public rcu_freed {
struct vmap *vmap; // va -> vma
uptr brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process
enum procstate _state; // Process state
volatile int pid; // Process ID
struct proc *parent; // Parent process
struct trapframe *tf; // Trap frame for current syscall
......@@ -73,61 +72,15 @@ struct proc : public rcu_freed {
LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
_state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
proc(int npid);
~proc(void);
memset(&childq, 0, sizeof(childq));
memset(&child_next, 0, sizeof(child_next));
memset(ofile, 0, sizeof(ofile));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep));
}
~proc() {
destroylock(&lock);
destroycondvar(&cv);
}
virtual void do_gc() { delete this; }
virtual void do_gc(void) { delete this; }
NEW_DELETE_OPS(proc)
};
static inline void
set_proc_state(struct proc *p, enum procstate s)
{
switch(p->_state) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
p->_state = s;
}
static inline enum procstate
get_proc_state(struct proc *p)
{
return p->_state;
}
void set_state(enum procstate s);
enum procstate get_state(void) const { return state_; }
private:
enum procstate state_; // Process state
};
......@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters);
myproc()->oncv = cv;
set_proc_state(myproc(), SLEEPING);
myproc()->set_state(SLEEPING);
if (timeout) {
acquire(&sleepers_lock);
......@@ -112,9 +112,9 @@ cv_wakeup(struct condvar *cv)
acquire(&cv->lock);
LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) {
acquire(&p->lock);
if (get_proc_state(p) != SLEEPING)
if (p->get_state() != SLEEPING)
panic("cv_wakeup: pid %u name %s state %u",
p->pid, p->name, get_proc_state(p));
p->pid, p->name, p->get_state());
if (p->oncv != cv)
panic("cv_wakeup: pid %u name %s p->cv %p cv %p",
p->pid, p->name, p->oncv, cv);
......
......@@ -23,7 +23,7 @@ idleloop(void)
sti();
for (;;) {
acquire(&myproc()->lock);
set_proc_state(myproc(), RUNNABLE);
myproc()->set_state(RUNNABLE);
sched();
if (steal() == 0) {
......
......@@ -20,7 +20,6 @@ proc_hash(const u32 &p)
return p;
}
//int __mpalign__ idle[NCPU];
xns<u32, proc*, proc_hash> *xnspid __mpalign__;
static struct proc *bootproc __mpalign__;
......@@ -30,12 +29,63 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 };
proc::proc(int npid) :
rcu_freed("proc"), vmap(0), brk(0), kstack(0),
pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0), state_(EMBRYO)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq));
memset(&child_next, 0, sizeof(child_next));
memset(ofile, 0, sizeof(ofile));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep));
}
proc::~proc(void)
{
destroylock(&lock);
destroycondvar(&cv);
}
void
proc::set_state(enum procstate s)
{
switch(state_) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
state_ = s;
}
// Give up the CPU for one scheduling round.
void
yield(void)
{
acquire(&myproc()->lock); //DOC: yieldlock
set_proc_state(myproc(), RUNNABLE);
myproc()->set_state(RUNNABLE);
sched();
}
......@@ -91,7 +141,7 @@ exit(void)
SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) {
acquire(&p->lock);
p->parent = bootproc;
if(get_proc_state(p) == ZOMBIE)
if(p->get_state() == ZOMBIE)
wakeupinit = 1;
SLIST_REMOVE(&(myproc()->childq), p, proc, child_next);
release(&p->lock);
......@@ -110,7 +160,7 @@ exit(void)
cv_wakeup(&bootproc->cv);
// Jump into the scheduler, never to return.
set_proc_state(myproc(), ZOMBIE);
myproc()->set_state(ZOMBIE);
sched();
panic("zombie exit");
}
......@@ -299,7 +349,7 @@ kill(int pid)
}
acquire(&p->lock);
p->killed = 1;
if(get_proc_state(p) == SLEEPING){
if(p->get_state() == SLEEPING){
// XXX
// we need to wake p up if it is cv_sleep()ing.
// can't change p from SLEEPING to RUNNABLE since that
......@@ -333,9 +383,9 @@ procdumpall(void)
uptr pc[10];
for (proc *p : xnspid) {
if(get_proc_state(p) >= 0 && get_proc_state(p) < NELEM(states) &&
states[get_proc_state(p)])
state = states[get_proc_state(p)];
if(p->get_state() >= 0 && p->get_state() < NELEM(states) &&
states[p->get_state()])
state = states[p->get_state()];
else
state = "???";
......@@ -345,7 +395,7 @@ procdumpall(void)
cprintf("\n%-3d %-10s %8s %2u %lu\n",
p->pid, name, state, p->cpuid, p->tsc);
if(get_proc_state(p) == SLEEPING){
if(p->get_state() == SLEEPING){
getcallerpcs((void*)p->context->rbp, pc, NELEM(pc));
for(int i=0; i<10 && pc[i] != 0; i++)
cprintf(" %lx\n", pc[i]);
......@@ -424,7 +474,7 @@ wait(void)
SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) {
havekids = 1;
acquire(&p->lock);
if(get_proc_state(p) == ZOMBIE){
if(p->get_state() == ZOMBIE){
release(&p->lock); // noone else better be trying to lock p
pid = p->pid;
SLIST_REMOVE(&myproc()->childq, p, proc, child_next);
......
......@@ -26,7 +26,7 @@ static struct runq runq[NCPU] __mpalign__;
void
post_swtch(void)
{
if (get_proc_state(mycpu()->prev) == RUNNABLE &&
if (mycpu()->prev->get_state() == RUNNABLE &&
mycpu()->prev != idlep[mycpu()->id])
addrun(mycpu()->prev);
release(&mycpu()->prev->lock);
......@@ -45,13 +45,13 @@ sched(void)
#endif
if(mycpu()->ncli != 1)
panic("sched locks");
if(get_proc_state(myproc()) == RUNNING)
if(myproc()->get_state() == RUNNING)
panic("sched running");
if(readrflags()&FL_IF)
panic("sched interruptible");
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE)
if (myproc()->get_state() == ZOMBIE)
mtstop(myproc());
else
mtpause(myproc());
......@@ -59,25 +59,25 @@ sched(void)
struct proc *next = schednext();
if (next == nullptr) {
if (get_proc_state(myproc()) != RUNNABLE) {
if (myproc()->get_state() != RUNNABLE) {
next = idlep[mycpu()->id];
} else {
set_proc_state(myproc(), RUNNING);
myproc()->set_state(RUNNING);
mycpu()->intena = intena;
release(&myproc()->lock);
return;
}
}
if (get_proc_state(next) != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next));
if (next->get_state() != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, next->get_state());
struct proc *prev = myproc();
mycpu()->proc = next;
mycpu()->prev = prev;
switchvm(next);
set_proc_state(next, RUNNING);
next->set_state(RUNNING);
next->tsc = rdtsc();
mtpause(next);
......@@ -99,7 +99,7 @@ addrun(struct proc *p)
// Always called with p->lock held
struct runq *q;
set_proc_state(p, RUNNABLE);
p->set_state(RUNNABLE);
q = &runq[p->cpuid];
acquire(&q->lock);
......@@ -141,7 +141,7 @@ steal(void)
if (tryacquire(&q->lock) == 0)
continue;
STAILQ_FOREACH(p, &q->q, runqlink) {
if (get_proc_state(p) == RUNNABLE && !p->cpu_pin &&
if (p->get_state() == RUNNABLE && !p->cpu_pin &&
p->curcycles != 0 && p->curcycles > VICTIMAGE)
{
STAILQ_REMOVE(&q->q, p, proc, runqlink);
......@@ -153,7 +153,7 @@ steal(void)
if (steal) {
acquire(&steal->lock);
if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin &&
if (steal->get_state() == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{
steal->curcycles = 0;
......@@ -163,7 +163,7 @@ steal(void)
r = 1;
break;
}
if (get_proc_state(steal) == RUNNABLE)
if (steal->get_state() == RUNNABLE)
addrun(steal);
release(&steal->lock);
}
......
......@@ -179,7 +179,7 @@ trap(struct trapframe *tf)
// Force process to give up CPU on clock tick.
// If interrupts were on while locks held, would need to check nlock.
if(myproc() && get_proc_state(myproc()) == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
if(myproc() && myproc()->get_state() == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
yield();
// Check if the process has been killed since we yielded
......
......@@ -220,7 +220,6 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
safestrcpy(p->name, name, sizeof(p->name));
acquire(&p->lock);
set_proc_state(p, RUNNABLE);
addrun(p);
release(&p->lock);
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论