提交 1be8db63 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Return of the Mtrace.

上级 781658d0
......@@ -19,5 +19,5 @@ mkfs
/mscan.out
/mscan.sorted
/mscan.syms
/mscan.kern
/mscan.kernel
config.mk
......@@ -2,22 +2,23 @@
# XXX compiling user progs with -mcmodel=kernel
#
# Custom config file? Set the default below..
# Custom config file? Otherwise use defaults.
-include config.mk
TOOLPREFIX ?= x86_64-jos-elf-
QEMU ?= qemu-system-x86_64
CPUS ?= 4
QEMUSMP ?= 4
QEMUSRC ?= ../mtrace
MTRACE ?= $(QEMU)
NM = $(TOOLPREFIX)nm
#CC = $(TOOLPREFIX)clang
CC = $(TOOLPREFIX)clang
CC = $(TOOLPREFIX)gcc
AS = $(TOOLPREFIX)gas
LD = $(TOOLPREFIX)ld
NM = $(TOOLPREFIX)nm
OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m64 \
-Werror -std=c99 -fms-extensions -mno-sse -mcmodel=kernel
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb \
-m64 -Werror -std=c99 -fms-extensions -mno-sse -mcmodel=kernel -I$(QEMUSRC)
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
ASFLAGS = -m64 -gdwarf-2
LDFLAGS += -m elf_x86_64
......@@ -91,18 +92,29 @@ mkfs: mkfs.c fs.h
fs.img: mkfs README $(UPROGS)
./mkfs fs.img README $(UPROGS)
mscan.syms: kernel
$(NM) -S $< > $@
mscan.kernel: kernel
cp $< $@
-include *.d
.PHONY: clean qemu gdb ud0
clean:
rm -f *.o *.d *.asm *.sym initcode kernel bootother mkfs fs.img
QEMUOPTS = -smp $(QEMUSMP) -m 512 -serial mon:stdio -nographic
MTRACEOPTS = -rtc clock=vm -mtrace-enable -mtrace-file mtrace.out \
-mtrace-quantum 100
QEMUOPTS = -smp $(CPUS) -m 512 -serial mon:stdio -nographic
qemu: kernel
$(QEMU) $(QEMUOPTS) -kernel kernel
gdb: kernel
$(QEMU) $(QEMUOPTS) -kernel kernel -S -s
mtrace: mscan.kernel mscan.syms
$(MTRACE) $(QEMUOPTS) $(MTRACEOPTS) -kernel kernel
ud0: kernel
rsync -avP kernel amsterdam.csail.mit.edu:/tftpboot/ud0/kernel.xv6
clean:
rm -f *.o *.d *.asm *.sym initcode kernel bootother mkfs fs.img
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#include "amd64.h"
#include "param.h"
#include "fcntl.h"
......
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#define NCHILD 2
#define NDEPTH 7
......
......@@ -4,7 +4,7 @@
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#define N 100
#define DEBUG 0
......
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#define NCHILD 2
#define NDEPTH 5
......
......@@ -8,7 +8,7 @@
#include "kernel.h"
#include "spinlock.h"
#include "kalloc.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#include "cpu.h"
struct kmem kmems[NCPU];
......@@ -55,12 +55,7 @@ kfree_pool(struct kmem *m, char *v)
m->freelist = r;
m->nfree++;
if (kinited)
mtrace_label_register(mtrace_label_block,
r,
0,
0,
0,
RET_EIP());
mtunlabel(mtrace_label_block, r);
release(&m->lock);
}
......@@ -101,12 +96,7 @@ kmemalloc(struct kmem *km)
return 0;
}
mtrace_label_register(mtrace_label_block,
r,
m->size,
"kalloc",
sizeof("kalloc"),
RET_EIP());
mtlabel(mtrace_label_block, r, m->size, "kalloc", sizeof("kalloc"));
if (kalloc_memset)
memset(r, 2, m->size);
......@@ -224,12 +214,7 @@ kmfree(void *ap)
{
acquire(&freelists[mycpu()->id].lock);
domfree(ap);
mtrace_label_register(mtrace_label_heap,
ap,
0,
0,
0,
RET_EIP());
mtunlabel(mtrace_label_heap, ap);
release(&freelists[mycpu()->id].lock);
}
......@@ -289,12 +274,7 @@ kmalloc(u64 nbytes)
release(&freelists[mycpu()->id].lock);
if (r)
mtrace_label_register(mtrace_label_heap,
r,
nbytes,
"kmalloc'ed",
sizeof("kmalloc'ed"),
RET_EIP());
mtlabel(mtrace_label_heap, r, nbytes, "kmalloc'ed", sizeof("kmalloc'ed"));
return r;
}
......
#include "xv6-mtrace.h"
#include "mtrace.h"
#if MTRACE
// Tell mtrace about switching threads
struct kstack_tag {
int val __attribute__((aligned (CACHELINE)));
int val __mpalign__;
};
extern struct kstack_tag kstack_tag[NCPU];
static inline void mtrace_kstack_start(void *eip,
struct proc *p)
static inline void mtstart(void *ip, struct proc *p)
{
unsigned long new_tag;
int i;
pushcli();
xchg((uint *)&i, 0);
new_tag = ++(kstack_tag[cpu->id].val) | (cpu->id << MTRACE_TAGSHIFT);
new_tag = ++(kstack_tag[mycpu()->id].val) | (mycpu()->id<<MTRACE_TAGSHIFT);
i = ++p->mtrace_stacks.curr;
if (i >= MTRACE_NSTACKS)
panic("mtrace_kstack_start: ran out of slots");
p->mtrace_stacks.tag[i] = new_tag;
mtrace_fcall_register(p->pid, (unsigned long)eip,
mtrace_fcall_register(p->pid, (unsigned long)ip,
p->mtrace_stacks.tag[i], i, mtrace_start);
popcli();
}
static inline void mtrace_kstack_stop(struct proc *p)
static inline void mtstop(struct proc *p)
{
int i;
pushcli();
......@@ -36,7 +35,7 @@ static inline void mtrace_kstack_stop(struct proc *p)
popcli();
}
static inline void mtrace_kstack_pause(struct proc *p)
static inline void mtpause(struct proc *p)
{
int i;
......@@ -46,7 +45,7 @@ static inline void mtrace_kstack_pause(struct proc *p)
mtrace_fcall_register(p->pid, 0, p->mtrace_stacks.tag[i], i, mtrace_pause);
}
static inline void mtrace_kstack_resume(struct proc *p)
static inline void mtresume(struct proc *p)
{
int i;
......@@ -55,3 +54,9 @@ static inline void mtrace_kstack_resume(struct proc *p)
panic("mtrace_kstack_resume: bad stack");
mtrace_fcall_register(p->pid, 0, p->mtrace_stacks.tag[i], i, mtrace_resume);
}
#else
#define mtstart(ip, p) do { } while (0)
#define mtstop(p) do { } while (0)
#define mtpause(p) do { } while (0)
#define mtresume(p) do { } while (0)
#endif
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#include "amd64.h"
#include "uspinlock.h"
......
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#include "amd64.h"
#include "uspinlock.h"
......
#if MTRACE
typedef __signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef int int32_t;
typedef unsigned int uint32_t;
typedef long long int64_t;
typedef unsigned long long uint64_t;
typedef __PTRDIFF_TYPE__ intptr_t;
typedef unsigned __PTRDIFF_TYPE__ uintptr_t;
void* memcpy(void *dst, const void *src, u32 n);
char* strncpy(char *s, const char *t, int n);
#define RET_IP() ((unsigned long)__builtin_return_address(0))
#include <mtrace-magic.h>
// Tell mtrace about memory allocation
#define mtlabel(type, addr, bytes, str, n) \
mtrace_label_register(type, addr, bytes, str, n, RET_IP())
#define mtunlabel(type, addr) \
mtrace_label_register(type, addr, 0, NULL, 0, RET_IP())
// Tell mtrace about locking
#define mtlock(ptr) \
mtrace_lock_register(RET_IP(), ptr, lockname(ptr), mtrace_lockop_acquire, 0)
#define mtacquired(ptr) \
mtrace_lock_register(RET_IP(), ptr, lockname(ptr), mtrace_lockop_acquired, 0)
#define mtunlock(ptr) \
mtrace_lock_register(RET_IP(), ptr, lockname(ptr), mtrace_lockop_release, 0)
// Tell mtrace to start/stop recording call and ret
#define mtrec(cpu) mtrace_call_set(1, cpu)
#define mtign(cpu) mtrace_call_set(0, cpu)
#include "mtrace-magic.h"
#else
#define mtlabel(type, addr, bytes, str, n) do { } while (0)
#define mtunlabel(type, addr) do { } while (0)
#define mtlock(ptr) do { } while (0)
#define mtacquired(ptr) do { } while (0)
#define mtunlock(ptr) do { } while (0)
#define mtrec(cpu) do { } while (0)
#define mtign(cpu) do { } while (0)
#endif
......@@ -9,16 +9,18 @@
#include "proc.h"
#include "cpu.h"
#include "bits.h"
#include "xv6-mtrace.h"
#include "kmtrace.h"
#include "vm.h"
extern void trapret(void);
int __mpalign__ idle[NCPU];
struct ns *nspid __mpalign__;
struct ns *nsrunq __mpalign__;
static struct proc *bootproc __mpalign__;
#if MTRACE
struct kstack_tag kstack_tag[NCPU];
#endif
enum { sched_debug = 0 };
void
......@@ -39,10 +41,10 @@ sched(void)
intena = mycpu()->intena;
myproc()->curcycles += rdtsc() - myproc()->tsc;
if (myproc()->state == ZOMBIE)
mtrace_kstack_stop(myproc());
mtstop(myproc());
else
mtrace_kstack_pause(myproc());
mtrace_call_set(0, mycpu()->id);
mtpause(myproc());
mtign(mycpu()->id);
swtch(&myproc()->context, mycpu()->scheduler);
mycpu()->intena = intena;
......@@ -141,9 +143,9 @@ forkret(void)
// b/c file system code needs a process context
// in which to call cv_sleep().
if(myproc()->cwd == 0) {
mtrace_kstack_start(forkret, myproc());
mtstart(forkret, myproc());
myproc()->cwd = namei("/");
mtrace_kstack_stop(myproc());
mtstop(myproc());
}
// Return to "caller", actually trapret (see allocproc).
......@@ -209,6 +211,7 @@ exit(void)
static struct proc*
allocproc(void)
{
extern void trapret(void);
struct proc *p;
char *sp;
......@@ -394,7 +397,7 @@ scheduler(void)
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtrace_kstack_start(scheduler, schedp);
mtstart(scheduler, schedp);
for(;;){
// Enable interrupts on this processor.
......@@ -417,17 +420,17 @@ scheduler(void)
p->state = RUNNING;
p->tsc = rdtsc();
mtrace_kstack_pause(schedp);
mtpause(schedp);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)rcu_gc_worker)
{
mtrace_kstack_resume(proc);
mtresume(p);
}
mtrace_call_set(1, mycpu()->id);
mtrec(mycpu()->id);
swtch(&mycpu()->scheduler, myproc()->context);
mtrace_kstack_resume(schedp);
mtrace_call_set(0, mycpu()->id);
mtresume(schedp);
mtign(mycpu()->id);
switchkvm();
// Process is done running for now.
......
......@@ -8,7 +8,7 @@
#include "queue.h"
#include "proc.h"
#include "cpu.h"
#include "xv6-mtrace.h"
#include "kmtrace.h"
struct rcu {
unsigned long epoch;
......@@ -107,7 +107,7 @@ rcu_gc_worker(void)
{
release(&myproc()->lock); // initially held by scheduler
mtrace_kstack_start(rcu_gc_worker, myproc());
mtstart(rcu_gc_worker, myproc());
struct spinlock wl;
initlock(&wl, "rcu_gc_worker"); // dummy lock
......
......@@ -7,7 +7,7 @@
#include "cpu.h"
#include "bits.h"
#include "spinlock.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
void
initlock(struct spinlock *lk, char *name)
......@@ -35,31 +35,13 @@ acquire(struct spinlock *lk)
}
#endif
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_acquire,
0);
mtlock(lk);
// The xchg is atomic.
// It also serializes, so that reads after acquire are not
// reordered before it.
while(xchg32(&lk->locked, 1) != 0)
;
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_acquired,
0);
mtacquired(lk);
#if SPINLOCK_DEBUG
// Record info about lock acquisition for debugging.
......@@ -79,15 +61,7 @@ release(struct spinlock *lk)
}
#endif
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_release,
0);
mtunlock(lk);
#if SPINLOCK_DEBUG
lk->pcs[0] = 0;
......
......@@ -15,3 +15,11 @@ struct spinlock {
#endif
};
static inline const char *lockname(struct spinlock *s)
{
#if SPINLOCK_DEBUG
return s->name ?: "null";
#else
return "unknown";
#endif
}
......@@ -8,8 +8,8 @@
#include "proc.h"
#include "amd64.h"
#include "syscall.h"
#include "xv6-mtrace.h"
#include "cpu.h"
#include "kmtrace.h"
// User code makes a system call with INT T_SYSCALL.
// System call number in %eax.
......@@ -169,11 +169,11 @@ syscall(void)
num = myproc()->tf->rax;
if(num >= 0 && num < NELEM(syscalls) && syscalls[num]) {
mtrace_kstack_start(syscalls[num], proc);
mtrace_call_set(1, cpunum());
mtstart(syscalls[num], myproc());
mtrec(cpunum());
myproc()->tf->rax = syscalls[num]();
mtrace_kstack_stop(myproc());
mtrace_call_set(0, cpunum());
mtstop(myproc());
mtign(cpunum());
} else {
cprintf("%d %s: unknown sys call %d\n",
myproc()->pid, myproc()->name, num);
......
#include "types.h"
#include "stat.h"
#include "user.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
#include "amd64.h"
#include "uspinlock.h"
......
......@@ -9,7 +9,7 @@
#include "spinlock.h"
#include "condvar.h"
#include "proc.h"
#include "xv6-mtrace.h"
#include "kmtrace.h"
#include "bits.h"
u64 ticks __mpalign__;
......@@ -51,13 +51,13 @@ trap(struct trapframe *tf)
// XXX(sbw) sysenter/sysexit
if(tf->trapno == T_SYSCALL){
if(myproc()->killed) {
mtrace_kstack_start(trap, proc);
mtstart(trap, myproc());
exit();
}
myproc()->tf = tf;
syscall();
if(myproc()->killed) {
mtrace_kstack_start(trap, myproc());
mtstart(trap, myproc());
exit();
}
return;
......@@ -65,8 +65,8 @@ trap(struct trapframe *tf)
#if MTRACE
if (myproc()->mtrace_stacks.curr >= 0)
mtrace_kstack_pause(myproc());
mtrace_kstack_start(trap, myproc());
mtpause(myproc());
mtstart(trap, myproc());
#endif
switch(tf->trapno){
......@@ -119,9 +119,9 @@ trap(struct trapframe *tf)
if(tf->trapno == T_PGFLT){
if(pagefault(myproc()->vmap, rcr2(), tf->err) >= 0){
#if MTRACE
mtrace_kstack_stop(myproc());
mtstop(myproc());
if (myproc()->mtrace_stacks.curr >= 0)
mtrace_kstack_resume(myproc());
mtresume(myproc());
#endif
return;
}
......@@ -151,9 +151,9 @@ trap(struct trapframe *tf)
exit();
#if MTRACE
mtrace_kstack_stop(myproc());
mtstop(myproc());
if (myproc()->mtrace_stacks.curr >= 0)
mtrace_kstack_resume(myproc());
mtresume(myproc());
#endif
}
......
......@@ -5,7 +5,7 @@
#include "fcntl.h"
#include "syscall.h"
#include "traps.h"
#include "xv6-mtrace.h"
#include "mtrace.h"
char buf[2048];
char name[3];
......
#if MTRACE
typedef __signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef int int32_t;
typedef unsigned int uint32_t;
typedef long long int64_t;
typedef unsigned long long uint64_t;
typedef __PTRDIFF_TYPE__ intptr_t;
typedef unsigned __PTRDIFF_TYPE__ uintptr_t;
void* memcpy(void *dst, const void *src, u32 n);
char* strncpy(char *s, const char *t, int n);
#define RET_EIP() ((unsigned long)__builtin_return_address(0))
#include "mtrace-magic.h"
#else
#define mtrace_lock_register(ip, x, name, op, y) do { } while (0)
#define mtrace_label_register(t, r, x, y, z, ip) do { } while (0)
#define mtrace_kstack_start(x, y) do { } while (0)
#define mtrace_kstack_stop(x) do { } while (0)
#define mtrace_kstack_pause(x) do { } while (0)
#define mtrace_kstack_resume(x) do { } while (0)
#define mtrace_call_set(x, y) do { } while (0)
#define RET_EIP() 0
#endif
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论