Fix up code that communicates process stacks to mtrace

上级 c23a185e
......@@ -18,6 +18,8 @@ static struct proc *initproc __attribute__ ((aligned (CACHELINE)));
extern void forkret(void);
extern void trapret(void);
struct kstack_tag kstack_tag[NCPU];
enum { sched_debug = 0 };
void
......@@ -51,7 +53,6 @@ allocproc(void)
p = kmalloc(sizeof(struct proc));
if (p == 0) return 0;
memset(p, 0, sizeof(*p));
p->state = EMBRYO;
p->state = EMBRYO;
p->pid = ns_allockey(nspid);
......@@ -59,6 +60,7 @@ allocproc(void)
p->cpuid = cpu->id;
p->on_runq = -1;
p->cpu_pin = 0;
p->mtrace_stacks.curr = -1;
snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid);
initlock(&p->lock, p->lockname+3);
......@@ -524,7 +526,7 @@ scheduler(void)
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtrace_kstack_register(scheduler, mtrace_start, schedp->pid);
mtrace_kstack_start(scheduler, schedp);
for(;;){
// Enable interrupts on this processor.
......@@ -547,15 +549,15 @@ scheduler(void)
p->state = RUNNING;
p->tsc = rdtsc();
mtrace_kstack_register(NULL, mtrace_pause, schedp->pid);
mtrace_kstack_pause(schedp);
if (p->context->eip != (uint)forkret &&
p->context->eip != (uint)rcu_gc_worker)
{
mtrace_kstack_register(NULL, mtrace_resume, 0);
mtrace_kstack_resume(proc);
}
mtrace_call_set(1, cpu->id);
swtch(&cpu->scheduler, proc->context);
mtrace_kstack_register(NULL, mtrace_resume, schedp->pid);
mtrace_kstack_resume(schedp);
mtrace_call_set(0, cpu->id);
switchkvm();
......@@ -609,9 +611,9 @@ sched(void)
intena = cpu->intena;
proc->curcycles += rdtsc() - proc->tsc;
if (proc->state == ZOMBIE)
mtrace_kstack_register(NULL, mtrace_done, 0);
mtrace_kstack_stop(proc);
else
mtrace_kstack_register(NULL, mtrace_pause, 0);
mtrace_kstack_pause(proc);
mtrace_call_set(0, cpu->id);
swtch(&proc->context, cpu->scheduler);
......@@ -640,9 +642,9 @@ forkret(void)
// b/c file system code needs a process context
// in which to call cv_sleep().
if(proc->cwd == 0) {
mtrace_kstack_register(forkret, mtrace_start, 0);
mtrace_kstack_start(forkret, proc);
proc->cwd = namei("/");
mtrace_kstack_register(NULL, mtrace_done, 0);
mtrace_kstack_stop(proc);
}
// Return to "caller", actually trapret (see allocproc).
......
......@@ -72,6 +72,18 @@ struct vmap {
char lockname[16];
};
// Per-process, per-stack meta data for mtrace
#define MTRACE_NSTACKS 16
#define MTRACE_TAGSHIFT 28
#if NCPU > 16
#error Oops -- decrease MTRACE_TAGSHIFT
#endif
struct mtrace_stacks {
int curr;
unsigned long tag[MTRACE_NSTACKS];
};
// Per-process state
struct proc {
struct vmap *vmap; // va -> vma
......@@ -100,6 +112,7 @@ struct proc {
char lockname[16];
int on_runq;
int cpu_pin;
struct mtrace_stacks mtrace_stacks;
};
// Process memory is laid out contiguously, low addresses first:
......
......@@ -116,7 +116,7 @@ rcu_gc_worker(void)
{
release(&proc->lock); // initially held by scheduler
mtrace_kstack_register(rcu_gc_worker, mtrace_start, 0);
mtrace_kstack_start(rcu_gc_worker, proc);
struct spinlock wl;
initlock(&wl, "rcu_gc_worker"); // dummy lock
......
......@@ -145,10 +145,10 @@ syscall(void)
num = proc->tf->eax;
if(num >= 0 && num < NELEM(syscalls) && syscalls[num]) {
mtrace_kstack_register(syscalls[num], mtrace_start, 0);
mtrace_kstack_start(syscalls[num], proc);
mtrace_call_set(1, cpunum());
proc->tf->eax = syscalls[num]();
mtrace_kstack_register(NULL, mtrace_done, 0);
mtrace_kstack_stop(proc);
mtrace_call_set(0, cpunum());
} else {
cprintf("%d %s: unknown sys call %d\n",
......
......@@ -9,6 +9,7 @@
#include "proc.h"
#include "x86.h"
#include "traps.h"
#include "xv6-kmtrace.h"
// Interrupt descriptor table (shared by all CPUs).
struct gatedesc idt[256];
......@@ -49,6 +50,11 @@ trap(struct trapframe *tf)
exit();
return;
}
if (proc->mtrace_stacks.curr >= 0)
mtrace_kstack_pause(proc);
mtrace_kstack_start(trap, proc);
switch(tf->trapno){
case T_IRQ0 + IRQ_TIMER:
if(cpu->id == 0){
......@@ -103,6 +109,9 @@ trap(struct trapframe *tf)
if(tf->trapno == T_PGFLT){
if(pagefault(proc->vmap, rcr2(), tf->err) >= 0){
mtrace_kstack_stop(proc);
if (proc->mtrace_stacks.curr >= 0)
mtrace_kstack_resume(proc);
return;
}
}
......@@ -129,4 +138,8 @@ trap(struct trapframe *tf)
// Check if the process has been killed since we yielded
if(proc && proc->killed && (tf->cs&3) == DPL_USER)
exit();
mtrace_kstack_stop(proc);
if (proc->mtrace_stacks.curr >= 0)
mtrace_kstack_resume(proc);
}
#include "xv6-mtrace.h"
static inline void mtrace_kstack_register(void *eip,
mtrace_call_state_t state,
int pid)
struct kstack_tag {
int val __attribute__((aligned (CACHELINE)));
};
extern struct kstack_tag kstack_tag[NCPU];
static inline void mtrace_kstack_start(void *eip,
struct proc *p)
{
if (pid == 0)
pid = proc->pid;
mtrace_fcall_register(pid, (unsigned long)eip, pid, 0, state);
unsigned long new_tag;
int i;
pushcli();
xchg((uint *)&i, 0);
new_tag = ++(kstack_tag[cpu->id].val) | (cpu->id << MTRACE_TAGSHIFT);
i = ++p->mtrace_stacks.curr;
if (i >= MTRACE_NSTACKS)
panic("mtrace_kstack_start: ran out of slots");
p->mtrace_stacks.tag[i] = new_tag;
mtrace_fcall_register(p->pid, (unsigned long)eip,
p->mtrace_stacks.tag[i], i, mtrace_start);
popcli();
}
static inline void mtrace_kstack_stop(struct proc *p)
{
int i;
pushcli();
i = p->mtrace_stacks.curr;
if (i < 0)
panic("mtrace_kstack_stop: fell off of stack");
mtrace_fcall_register(p->pid, 0, p->mtrace_stacks.tag[i], i, mtrace_done);
p->mtrace_stacks.tag[i] = 0;
p->mtrace_stacks.curr--;
popcli();
}
static inline void mtrace_kstack_pause(struct proc *p)
{
int i;
i = p->mtrace_stacks.curr;
if (i < 0)
panic("mtrace_kstack_pause: bad stack");
mtrace_fcall_register(p->pid, 0, p->mtrace_stacks.tag[i], i, mtrace_pause);
}
static inline void mtrace_kstack_resume(struct proc *p)
{
int i;
i = p->mtrace_stacks.curr;
if (i < 0)
panic("mtrace_kstack_resume: bad stack");
mtrace_fcall_register(p->pid, 0, p->mtrace_stacks.tag[i], i, mtrace_resume);
}
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论