提交 6fdb1a06 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

Merge branch 'scale-amd64' of git+ssh://pdos.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

...@@ -27,16 +27,16 @@ OBJCOPY = $(TOOLPREFIX)objcopy ...@@ -27,16 +27,16 @@ OBJCOPY = $(TOOLPREFIX)objcopy
STRIP = $(TOOLPREFIX)strip STRIP = $(TOOLPREFIX)strip
# XXX(sbw) # XXX(sbw)
# -nostdinc -nostdinc++ # -nostdinc
COMFLAGS = -static -fno-builtin -fno-strict-aliasing -O2 -Wall \ COMFLAGS = -static -g -MD -m64 -O2 -Wall -Werror -DHW_$(HW) -DXV6 \
-g -MD -m64 -Werror -fms-extensions -mno-sse -mcx16 \ -fno-builtin -fno-strict-aliasing -fno-omit-frame-pointer -fms-extensions \
-mno-red-zone -Iinclude -I$(QEMUSRC) -fno-omit-frame-pointer \ -mno-sse -mcx16 -mno-red-zone -Iinclude -I$(QEMUSRC) \
-DHW_$(HW) -include param.h -include include/compiler.h -DXV6 -include param.h -include include/compiler.h
COMFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector) COMFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
CFLAGS := $(COMFLAGS) -std=c99 $(CFLAGS) CFLAGS := $(COMFLAGS) -std=c99 $(CFLAGS)
CXXFLAGS := $(COMFLAGS) -std=c++0x -Wno-sign-compare -fno-exceptions -fno-rtti -fcheck-new $(CXXFLAGS) CXXFLAGS := $(COMFLAGS) -std=c++0x -Wno-sign-compare -fno-exceptions -fno-rtti -fcheck-new -nostdinc++ $(CXXFLAGS)
ASFLAGS = -Iinclude -m64 -gdwarf-2 -MD -DHW_$(HW) -include param.h ASFLAGS = -Iinclude -I$(O)/include -m64 -gdwarf-2 -MD -DHW_$(HW) -include param.h
LDFLAGS = -m elf_x86_64 LDFLAGS = -m elf_x86_64
all: all:
......
...@@ -129,6 +129,3 @@ struct taskstate ...@@ -129,6 +129,3 @@ struct taskstate
(u16) ((rip)&0xffff), (cs), 0, bits, (u16) (((rip)>>16)&0xffff), \ (u16) ((rip)&0xffff), (cs), 0, bits, (u16) (((rip)>>16)&0xffff), \
(u32) ((u64)(rip)>>32), 0, \ (u32) ((u64)(rip)>>32), 0, \
} }
#define PROC_KSTACK_OFFSET 40
#define TRAPFRAME_SIZE 192
...@@ -41,7 +41,6 @@ struct proc : public rcu_freed { ...@@ -41,7 +41,6 @@ struct proc : public rcu_freed {
struct vmap *vmap; // va -> vma struct vmap *vmap; // va -> vma
uptr brk; // Top of heap uptr brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process char *kstack; // Bottom of kernel stack for this process
enum procstate _state; // Process state
volatile int pid; // Process ID volatile int pid; // Process ID
struct proc *parent; // Parent process struct proc *parent; // Parent process
struct trapframe *tf; // Trap frame for current syscall struct trapframe *tf; // Trap frame for current syscall
...@@ -73,61 +72,15 @@ struct proc : public rcu_freed { ...@@ -73,61 +72,15 @@ struct proc : public rcu_freed {
LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0), proc(int npid);
_state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0), ~proc(void);
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq)); virtual void do_gc(void) { delete this; }
memset(&child_next, 0, sizeof(child_next));
memset(ofile, 0, sizeof(ofile));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep));
}
~proc() {
destroylock(&lock);
destroycondvar(&cv);
}
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(proc) NEW_DELETE_OPS(proc)
};
static inline void void set_state(enum procstate s);
set_proc_state(struct proc *p, enum procstate s) enum procstate get_state(void) const { return state_; }
{
switch(p->_state) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
p->_state = s;
}
static inline enum procstate
get_proc_state(struct proc *p)
{
return p->_state;
}
private:
enum procstate state_; // Process state
};
...@@ -79,5 +79,12 @@ $(O)/kernel/%: kernel/%.S ...@@ -79,5 +79,12 @@ $(O)/kernel/%: kernel/%.S
$(Q)$(LD) $(LDFLAGS) -N -e start -Ttext $(TTEXT) -o $@.out $@.o $(Q)$(LD) $(LDFLAGS) -N -e start -Ttext $(TTEXT) -o $@.out $@.o
$(Q)$(OBJCOPY) -S -O binary $@.out $@ $(Q)$(OBJCOPY) -S -O binary $@.out $@
$(O)/include/asmdefines.h: kernel/asmdefines.c
@echo " GEN $@"
$(Q)mkdir -p $(@D)
$(Q)$(CXX) -S $(CXXFLAGS) -o $(O)/kernel/asmdefines.S kernel/asmdefines.c
$(Q)sed -n 's/remove\$$//p' $(O)/kernel/asmdefines.S > $@
$(O)/kernel/trapasm.o: $(O)/include/asmdefines.h
.PRECIOUS: $(O)/kernel/%.o .PRECIOUS: $(O)/kernel/%.o
-include $(O)/kernel/*.d -include $(O)/kernel/*.d
#include "types.h"
#include "kernel.hh"
#include "spinlock.h"
#include "amd64.h"
#include "condvar.h"
#include "proc.hh"
#define DEFINE(sym, val) \
asm volatile ("\n#define " #sym " remove%0 " : : "i" (val))
void
asmdefines(void)
{
DEFINE(PROC_KSTACK_OFFSET, __offsetof(struct proc, kstack));
DEFINE(TRAPFRAME_SIZE, sizeof(trapframe));
}
...@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout) ...@@ -82,7 +82,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters); LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters);
myproc()->oncv = cv; myproc()->oncv = cv;
set_proc_state(myproc(), SLEEPING); myproc()->set_state(SLEEPING);
if (timeout) { if (timeout) {
acquire(&sleepers_lock); acquire(&sleepers_lock);
...@@ -112,9 +112,9 @@ cv_wakeup(struct condvar *cv) ...@@ -112,9 +112,9 @@ cv_wakeup(struct condvar *cv)
acquire(&cv->lock); acquire(&cv->lock);
LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) { LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) {
acquire(&p->lock); acquire(&p->lock);
if (get_proc_state(p) != SLEEPING) if (p->get_state() != SLEEPING)
panic("cv_wakeup: pid %u name %s state %u", panic("cv_wakeup: pid %u name %s state %u",
p->pid, p->name, get_proc_state(p)); p->pid, p->name, p->get_state());
if (p->oncv != cv) if (p->oncv != cv)
panic("cv_wakeup: pid %u name %s p->cv %p cv %p", panic("cv_wakeup: pid %u name %s p->cv %p cv %p",
p->pid, p->name, p->oncv, cv); p->pid, p->name, p->oncv, cv);
......
...@@ -23,7 +23,7 @@ idleloop(void) ...@@ -23,7 +23,7 @@ idleloop(void)
sti(); sti();
for (;;) { for (;;) {
acquire(&myproc()->lock); acquire(&myproc()->lock);
set_proc_state(myproc(), RUNNABLE); myproc()->set_state(RUNNABLE);
sched(); sched();
if (steal() == 0) { if (steal() == 0) {
......
...@@ -105,13 +105,6 @@ cmain(u64 mbmagic, u64 mbaddr) ...@@ -105,13 +105,6 @@ cmain(u64 mbmagic, u64 mbaddr)
kpml4.e[0] = 0; // don't need 1 GB identity mapping anymore kpml4.e[0] = 0; // don't need 1 GB identity mapping anymore
lcr3(rcr3()); lcr3(rcr3());
if (PROC_KSTACK_OFFSET != __offsetof(struct proc, kstack))
panic("PROC_KSTACK_OFFSET mismatch: %d %ld\n",
PROC_KSTACK_OFFSET, __offsetof(struct proc, kstack));
if (TRAPFRAME_SIZE != sizeof(trapframe))
panic("TRAPFRAME_SIZE mismatch: %d %ld\n",
TRAPFRAME_SIZE, sizeof(trapframe));
idleloop(); idleloop();
panic("Unreachable"); panic("Unreachable");
......
...@@ -20,7 +20,6 @@ proc_hash(const u32 &p) ...@@ -20,7 +20,6 @@ proc_hash(const u32 &p)
return p; return p;
} }
//int __mpalign__ idle[NCPU];
xns<u32, proc*, proc_hash> *xnspid __mpalign__; xns<u32, proc*, proc_hash> *xnspid __mpalign__;
static struct proc *bootproc __mpalign__; static struct proc *bootproc __mpalign__;
...@@ -30,12 +29,63 @@ struct kstack_tag kstack_tag[NCPU]; ...@@ -30,12 +29,63 @@ struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 }; enum { sched_debug = 0 };
proc::proc(int npid) :
rcu_freed("proc"), vmap(0), brk(0), kstack(0),
pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0), state_(EMBRYO)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq));
memset(&child_next, 0, sizeof(child_next));
memset(ofile, 0, sizeof(ofile));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep));
}
proc::~proc(void)
{
destroylock(&lock);
destroycondvar(&cv);
}
void
proc::set_state(enum procstate s)
{
switch(state_) {
case EMBRYO:
if (s != RUNNABLE)
panic("EMBRYO -> %u", s);
break;
case SLEEPING:
if (s != RUNNABLE)
panic("SLEEPING -> %u", s);
break;
case RUNNABLE:
if (s != RUNNING && s != RUNNABLE)
panic("RUNNABLE -> %u", s);
break;
case RUNNING:
if (s != RUNNABLE && s != SLEEPING && s != ZOMBIE)
panic("RUNNING -> %u", s);
break;
case ZOMBIE:
panic("ZOMBIE -> %u", s);
}
state_ = s;
}
// Give up the CPU for one scheduling round. // Give up the CPU for one scheduling round.
void void
yield(void) yield(void)
{ {
acquire(&myproc()->lock); //DOC: yieldlock acquire(&myproc()->lock); //DOC: yieldlock
set_proc_state(myproc(), RUNNABLE); myproc()->set_state(RUNNABLE);
sched(); sched();
} }
...@@ -91,7 +141,7 @@ exit(void) ...@@ -91,7 +141,7 @@ exit(void)
SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) { SLIST_FOREACH_SAFE(p, &(myproc()->childq), child_next, np) {
acquire(&p->lock); acquire(&p->lock);
p->parent = bootproc; p->parent = bootproc;
if(get_proc_state(p) == ZOMBIE) if(p->get_state() == ZOMBIE)
wakeupinit = 1; wakeupinit = 1;
SLIST_REMOVE(&(myproc()->childq), p, proc, child_next); SLIST_REMOVE(&(myproc()->childq), p, proc, child_next);
release(&p->lock); release(&p->lock);
...@@ -110,7 +160,7 @@ exit(void) ...@@ -110,7 +160,7 @@ exit(void)
cv_wakeup(&bootproc->cv); cv_wakeup(&bootproc->cv);
// Jump into the scheduler, never to return. // Jump into the scheduler, never to return.
set_proc_state(myproc(), ZOMBIE); myproc()->set_state(ZOMBIE);
sched(); sched();
panic("zombie exit"); panic("zombie exit");
} }
...@@ -299,7 +349,7 @@ kill(int pid) ...@@ -299,7 +349,7 @@ kill(int pid)
} }
acquire(&p->lock); acquire(&p->lock);
p->killed = 1; p->killed = 1;
if(get_proc_state(p) == SLEEPING){ if(p->get_state() == SLEEPING){
// XXX // XXX
// we need to wake p up if it is cv_sleep()ing. // we need to wake p up if it is cv_sleep()ing.
// can't change p from SLEEPING to RUNNABLE since that // can't change p from SLEEPING to RUNNABLE since that
...@@ -333,9 +383,9 @@ procdumpall(void) ...@@ -333,9 +383,9 @@ procdumpall(void)
uptr pc[10]; uptr pc[10];
for (proc *p : xnspid) { for (proc *p : xnspid) {
if(get_proc_state(p) >= 0 && get_proc_state(p) < NELEM(states) && if(p->get_state() >= 0 && p->get_state() < NELEM(states) &&
states[get_proc_state(p)]) states[p->get_state()])
state = states[get_proc_state(p)]; state = states[p->get_state()];
else else
state = "???"; state = "???";
...@@ -345,7 +395,7 @@ procdumpall(void) ...@@ -345,7 +395,7 @@ procdumpall(void)
cprintf("\n%-3d %-10s %8s %2u %lu\n", cprintf("\n%-3d %-10s %8s %2u %lu\n",
p->pid, name, state, p->cpuid, p->tsc); p->pid, name, state, p->cpuid, p->tsc);
if(get_proc_state(p) == SLEEPING){ if(p->get_state() == SLEEPING){
getcallerpcs((void*)p->context->rbp, pc, NELEM(pc)); getcallerpcs((void*)p->context->rbp, pc, NELEM(pc));
for(int i=0; i<10 && pc[i] != 0; i++) for(int i=0; i<10 && pc[i] != 0; i++)
cprintf(" %lx\n", pc[i]); cprintf(" %lx\n", pc[i]);
...@@ -424,7 +474,7 @@ wait(void) ...@@ -424,7 +474,7 @@ wait(void)
SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) { SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) {
havekids = 1; havekids = 1;
acquire(&p->lock); acquire(&p->lock);
if(get_proc_state(p) == ZOMBIE){ if(p->get_state() == ZOMBIE){
release(&p->lock); // noone else better be trying to lock p release(&p->lock); // noone else better be trying to lock p
pid = p->pid; pid = p->pid;
SLIST_REMOVE(&myproc()->childq, p, proc, child_next); SLIST_REMOVE(&myproc()->childq, p, proc, child_next);
......
...@@ -26,7 +26,7 @@ static struct runq runq[NCPU] __mpalign__; ...@@ -26,7 +26,7 @@ static struct runq runq[NCPU] __mpalign__;
void void
post_swtch(void) post_swtch(void)
{ {
if (get_proc_state(mycpu()->prev) == RUNNABLE && if (mycpu()->prev->get_state() == RUNNABLE &&
mycpu()->prev != idlep[mycpu()->id]) mycpu()->prev != idlep[mycpu()->id])
addrun(mycpu()->prev); addrun(mycpu()->prev);
release(&mycpu()->prev->lock); release(&mycpu()->prev->lock);
...@@ -45,13 +45,13 @@ sched(void) ...@@ -45,13 +45,13 @@ sched(void)
#endif #endif
if(mycpu()->ncli != 1) if(mycpu()->ncli != 1)
panic("sched locks"); panic("sched locks");
if(get_proc_state(myproc()) == RUNNING) if(myproc()->get_state() == RUNNING)
panic("sched running"); panic("sched running");
if(readrflags()&FL_IF) if(readrflags()&FL_IF)
panic("sched interruptible"); panic("sched interruptible");
intena = mycpu()->intena; intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc; myproc()->curcycles += rdtsc() - myproc()->tsc;
if (get_proc_state(myproc()) == ZOMBIE) if (myproc()->get_state() == ZOMBIE)
mtstop(myproc()); mtstop(myproc());
else else
mtpause(myproc()); mtpause(myproc());
...@@ -59,25 +59,25 @@ sched(void) ...@@ -59,25 +59,25 @@ sched(void)
struct proc *next = schednext(); struct proc *next = schednext();
if (next == nullptr) { if (next == nullptr) {
if (get_proc_state(myproc()) != RUNNABLE) { if (myproc()->get_state() != RUNNABLE) {
next = idlep[mycpu()->id]; next = idlep[mycpu()->id];
} else { } else {
set_proc_state(myproc(), RUNNING); myproc()->set_state(RUNNING);
mycpu()->intena = intena; mycpu()->intena = intena;
release(&myproc()->lock); release(&myproc()->lock);
return; return;
} }
} }
if (get_proc_state(next) != RUNNABLE) if (next->get_state() != RUNNABLE)
panic("non-RUNNABLE next %s %u", next->name, get_proc_state(next)); panic("non-RUNNABLE next %s %u", next->name, next->get_state());
struct proc *prev = myproc(); struct proc *prev = myproc();
mycpu()->proc = next; mycpu()->proc = next;
mycpu()->prev = prev; mycpu()->prev = prev;
switchvm(next); switchvm(next);
set_proc_state(next, RUNNING); next->set_state(RUNNING);
next->tsc = rdtsc(); next->tsc = rdtsc();
mtpause(next); mtpause(next);
...@@ -99,7 +99,7 @@ addrun(struct proc *p) ...@@ -99,7 +99,7 @@ addrun(struct proc *p)
// Always called with p->lock held // Always called with p->lock held
struct runq *q; struct runq *q;
set_proc_state(p, RUNNABLE); p->set_state(RUNNABLE);
q = &runq[p->cpuid]; q = &runq[p->cpuid];
acquire(&q->lock); acquire(&q->lock);
...@@ -141,7 +141,7 @@ steal(void) ...@@ -141,7 +141,7 @@ steal(void)
if (tryacquire(&q->lock) == 0) if (tryacquire(&q->lock) == 0)
continue; continue;
STAILQ_FOREACH(p, &q->q, runqlink) { STAILQ_FOREACH(p, &q->q, runqlink) {
if (get_proc_state(p) == RUNNABLE && !p->cpu_pin && if (p->get_state() == RUNNABLE && !p->cpu_pin &&
p->curcycles != 0 && p->curcycles > VICTIMAGE) p->curcycles != 0 && p->curcycles > VICTIMAGE)
{ {
STAILQ_REMOVE(&q->q, p, proc, runqlink); STAILQ_REMOVE(&q->q, p, proc, runqlink);
...@@ -153,7 +153,7 @@ steal(void) ...@@ -153,7 +153,7 @@ steal(void)
if (steal) { if (steal) {
acquire(&steal->lock); acquire(&steal->lock);
if (get_proc_state(steal) == RUNNABLE && !steal->cpu_pin && if (steal->get_state() == RUNNABLE && !steal->cpu_pin &&
steal->curcycles != 0 && steal->curcycles > VICTIMAGE) steal->curcycles != 0 && steal->curcycles > VICTIMAGE)
{ {
steal->curcycles = 0; steal->curcycles = 0;
...@@ -163,7 +163,7 @@ steal(void) ...@@ -163,7 +163,7 @@ steal(void)
r = 1; r = 1;
break; break;
} }
if (get_proc_state(steal) == RUNNABLE) if (steal->get_state() == RUNNABLE)
addrun(steal); addrun(steal);
release(&steal->lock); release(&steal->lock);
} }
......
...@@ -179,7 +179,7 @@ trap(struct trapframe *tf) ...@@ -179,7 +179,7 @@ trap(struct trapframe *tf)
// Force process to give up CPU on clock tick. // Force process to give up CPU on clock tick.
// If interrupts were on while locks held, would need to check nlock. // If interrupts were on while locks held, would need to check nlock.
if(myproc() && get_proc_state(myproc()) == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER) if(myproc() && myproc()->get_state() == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
yield(); yield();
// Check if the process has been killed since we yielded // Check if the process has been killed since we yielded
......
#include "mmu.h" #include "mmu.h"
#include "asmdefines.h"
#define NOEC pushq $0 #define NOEC pushq $0
#define EC #define EC
......
...@@ -220,7 +220,6 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, ...@@ -220,7 +220,6 @@ sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
safestrcpy(p->name, name, sizeof(p->name)); safestrcpy(p->name, name, sizeof(p->name));
acquire(&p->lock); acquire(&p->lock);
set_proc_state(p, RUNNABLE);
addrun(p); addrun(p);
release(&p->lock); release(&p->lock);
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论