提交 fef8b31e 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

Merge branch 'scale-amd64' of git+ssh://pdos.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

...@@ -191,6 +191,14 @@ rrsp(void) ...@@ -191,6 +191,14 @@ rrsp(void)
return val; return val;
} }
static inline u64
rrbp(void)
{
u64 val;
__asm volatile("movq %%rbp,%0" : "=r" (val));
return val;
}
static inline void static inline void
lcr4(u64 val) lcr4(u64 val)
{ {
......
...@@ -97,6 +97,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -97,6 +97,7 @@ bget(u32 dev, u64 sector, int *writer)
victim->flags |= B_BUSY; victim->flags |= B_BUSY;
ns_remove(bufns, KII(victim->dev, victim->sector), victim); ns_remove(bufns, KII(victim->dev, victim->sector), victim);
release(&victim->lock); release(&victim->lock);
destroylock(&victim->lock);
gc_delayed(victim, kmfree); gc_delayed(victim, kmfree);
b = kmalloc(sizeof(*b)); b = kmalloc(sizeof(*b));
...@@ -105,10 +106,11 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -105,10 +106,11 @@ bget(u32 dev, u64 sector, int *writer)
b->flags = B_BUSY; b->flags = B_BUSY;
*writer = 1; *writer = 1;
snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector); snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector);
initlock(&b->lock, b->lockname+3); initlock(&b->lock, b->lockname+3, LOCKSTAT_BIO);
initcondvar(&b->cv, b->lockname); initcondvar(&b->cv, b->lockname);
gc_begin_epoch(); gc_begin_epoch();
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) { if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) {
destroylock(&b->lock);
gc_delayed(b, kmfree); gc_delayed(b, kmfree);
goto loop; goto loop;
} }
...@@ -168,7 +170,7 @@ initbio(void) ...@@ -168,7 +170,7 @@ initbio(void)
b->dev = 0xdeadbeef; b->dev = 0xdeadbeef;
b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */ b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */
b->flags = 0; b->flags = 0;
initlock(&b->lock, "bcache-lock"); initlock(&b->lock, "bcache-lock", LOCKSTAT_BIO);
initcondvar(&b->cv, "bcache-cv"); initcondvar(&b->cv, "bcache-cv");
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0)
panic("binit ns_insert"); panic("binit ns_insert");
......
...@@ -136,6 +136,6 @@ cv_wakeup(struct condvar *cv) ...@@ -136,6 +136,6 @@ cv_wakeup(struct condvar *cv)
void void
initcondvar(struct condvar *cv, const char *n) initcondvar(struct condvar *cv, const char *n)
{ {
initlock(&cv->lock, n); initlock(&cv->lock, n, LOCKSTAT_CONDVAR);
LIST_INIT(&cv->waiters); LIST_INIT(&cv->waiters);
} }
...@@ -135,26 +135,13 @@ puts(const char *s) ...@@ -135,26 +135,13 @@ puts(const char *s)
} }
static inline void static inline void
stacktrace(void) printtrace(u64 rbp)
{ {
#define PRINT_RET(i) \ uptr pc[10];
do { \
uptr addr = (uptr) __builtin_return_address(i); \ getcallerpcs((void*)rbp, pc, NELEM(pc));
if ((addr & KBASE) == KBASE) \ for (int i = 0; i < NELEM(pc) && pc[i] != 0; i++)
__cprintf(" %lx\n", addr); \ __cprintf(" %p\n", pc[i]);
else \
return; \
} while (0)
PRINT_RET(0);
PRINT_RET(1);
PRINT_RET(2);
PRINT_RET(3);
PRINT_RET(4);
PRINT_RET(5);
PRINT_RET(6);
#undef PRINT_RET
} }
void __noret__ void __noret__
...@@ -164,8 +151,6 @@ kerneltrap(struct trapframe *tf) ...@@ -164,8 +151,6 @@ kerneltrap(struct trapframe *tf)
const char *name = "(no name)"; const char *name = "(no name)";
void *kstack = NULL; void *kstack = NULL;
int pid = 0; int pid = 0;
uptr pc[10];
int i;
cli(); cli();
acquire(&cons.lock); acquire(&cons.lock);
...@@ -182,9 +167,7 @@ kerneltrap(struct trapframe *tf) ...@@ -182,9 +167,7 @@ kerneltrap(struct trapframe *tf)
tf->trapno, mycpu()->id, tf->trapno, mycpu()->id,
tf->rip, tf->rsp, rcr2(), tf->rip, tf->rsp, rcr2(),
name, pid, kstack); name, pid, kstack);
getcallerpcs((void*)tf->rbp, pc, NELEM(pc)); printtrace(tf->rbp);
for (i = 0; i < NELEM(pc) && pc[i] != 0; i++)
__cprintf(" %p\n", pc[i]);
panicked = 1; panicked = 1;
sys_halt(); sys_halt();
...@@ -206,7 +189,7 @@ panic(const char *fmt, ...) ...@@ -206,7 +189,7 @@ panic(const char *fmt, ...)
vprintfmt(writecons, 0, fmt, ap); vprintfmt(writecons, 0, fmt, ap);
va_end(ap); va_end(ap);
__cprintf("\n"); __cprintf("\n");
stacktrace(); printtrace(rrbp());
panicked = 1; panicked = 1;
sys_halt(); sys_halt();
...@@ -337,7 +320,7 @@ consoleread(struct inode *ip, char *dst, u32 off, u32 n) ...@@ -337,7 +320,7 @@ consoleread(struct inode *ip, char *dst, u32 off, u32 n)
void void
initconsole(void) initconsole(void)
{ {
initlock(&cons.lock, "console"); initlock(&cons.lock, "console", LOCKSTAT_CONSOLE);
cons.locking = 1; cons.locking = 1;
devsw[CONSOLE].write = consolewrite; devsw[CONSOLE].write = consolewrite;
......
...@@ -62,7 +62,7 @@ crange_alloc(int nlevel) ...@@ -62,7 +62,7 @@ crange_alloc(int nlevel)
cr->crange_head.size = 0; cr->crange_head.size = 0;
assert(kmalign((void **) &cr->crange_head.lock, assert(kmalign((void **) &cr->crange_head.lock,
CACHELINE, sizeof(struct spinlock)) == 0); CACHELINE, sizeof(struct spinlock)) == 0);
initlock(cr->crange_head.lock, "head lock"); initlock(cr->crange_head.lock, "head lock", LOCKSTAT_CRANGE);
cr->crange_head.next = kmalloc(sizeof(cr->crange_head.next[0]) * nlevel); cr->crange_head.next = kmalloc(sizeof(cr->crange_head.next[0]) * nlevel);
for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0; for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0;
if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr); if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr);
...@@ -82,6 +82,7 @@ crange_free(struct crange *cr) ...@@ -82,6 +82,7 @@ crange_free(struct crange *cr)
clist_range_free(e); clist_range_free(e);
} }
kmfree(cr->crange_head.next); kmfree(cr->crange_head.next);
destroylock(cr->crange_head.lock);
kmalignfree(cr->crange_head.lock); kmalignfree(cr->crange_head.lock);
kmalignfree(cr); kmalignfree(cr);
} }
...@@ -176,6 +177,7 @@ clist_range_free(void *p) ...@@ -176,6 +177,7 @@ clist_range_free(void *p)
for (int l = 0; l < e->nlevel; l++) { for (int l = 0; l < e->nlevel; l++) {
e->next[l] = (struct clist_range *) 0xDEADBEEF; e->next[l] = (struct clist_range *) 0xDEADBEEF;
} }
destroylock(e->lock);
kmalignfree(e->lock); kmalignfree(e->lock);
kmfree(e->next); kmfree(e->next);
kmalignfree(e); kmalignfree(e);
...@@ -210,7 +212,7 @@ crange_new(struct crange *cr, u64 k, u64 sz, void *v, struct clist_range *n) ...@@ -210,7 +212,7 @@ crange_new(struct crange *cr, u64 k, u64 sz, void *v, struct clist_range *n)
for (int l = 1; l < r->nlevel; l++) r->next[l] = 0; for (int l = 1; l < r->nlevel; l++) r->next[l] = 0;
assert(kmalign((void **) &r->lock, CACHELINE, assert(kmalign((void **) &r->lock, CACHELINE,
sizeof(struct spinlock)) == 0); sizeof(struct spinlock)) == 0);
initlock(r->lock, "crange"); initlock(r->lock, "crange", LOCKSTAT_CRANGE);
r->cr = cr; r->cr = cr;
return r; return r;
} }
......
...@@ -275,7 +275,7 @@ e1000attach(struct pci_func *pcif) ...@@ -275,7 +275,7 @@ e1000attach(struct pci_func *pcif)
pci_func_enable(pcif); pci_func_enable(pcif);
initlock(&e1000.lk, "e1000"); initlock(&e1000.lk, "e1000", 1);
e1000.membase = pcif->reg_base[0]; e1000.membase = pcif->reg_base[0];
e1000.iobase = pcif->reg_base[2]; e1000.iobase = pcif->reg_base[2];
e1000.pcidevid = pcif->dev_id; e1000.pcidevid = pcif->dev_id;
......
...@@ -234,6 +234,7 @@ ifree(void *arg) ...@@ -234,6 +234,7 @@ ifree(void *arg)
ip->dir = 0; ip->dir = 0;
} }
destroylock(&ip->lock);
kmfree(ip); kmfree(ip);
} }
...@@ -295,10 +296,11 @@ iget(u32 dev, u32 inum) ...@@ -295,10 +296,11 @@ iget(u32 dev, u32 inum)
ip->flags = I_BUSYR | I_BUSYW; ip->flags = I_BUSYR | I_BUSYW;
ip->readbusy = 1; ip->readbusy = 1;
snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum); snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum);
initlock(&ip->lock, ip->lockname+3); initlock(&ip->lock, ip->lockname+3, LOCKSTAT_FS);
initcondvar(&ip->cv, ip->lockname); initcondvar(&ip->cv, ip->lockname);
ip->dir = 0; ip->dir = 0;
if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) { if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) {
destroylock(&ip->lock);
gc_delayed(ip, kmfree); gc_delayed(ip, kmfree);
goto retry; goto retry;
} }
......
...@@ -277,7 +277,7 @@ gc_worker(void *x) ...@@ -277,7 +277,7 @@ gc_worker(void *x)
if (VERBOSE) if (VERBOSE)
cprintf("gc_worker: %d\n", mycpu()->id); cprintf("gc_worker: %d\n", mycpu()->id);
initlock(&wl, "rcu_gc_worker dummy"); // dummy lock initlock(&wl, "rcu_gc_worker dummy", LOCKSTAT_GC); // dummy lock
for (;;) { for (;;) {
u64 i; u64 i;
acquire(&wl); acquire(&wl);
...@@ -305,13 +305,13 @@ initprocgc(struct proc *p) ...@@ -305,13 +305,13 @@ initprocgc(struct proc *p)
{ {
p->epoch = global_epoch; p->epoch = global_epoch;
p->epoch_depth = 0; p->epoch_depth = 0;
initlock(&p->gc_epoch_lock, "per process gc_lock"); initlock(&p->gc_epoch_lock, "per process gc_lock", 0);
} }
void void
initgc(void) initgc(void)
{ {
initlock(&gc_lock.l, "gc"); initlock(&gc_lock.l, "gc", LOCKSTAT_GC);
global_epoch = NEPOCH-2; global_epoch = NEPOCH-2;
for (int i = 0; i < ncpu; i++) { for (int i = 0; i < ncpu; i++) {
......
...@@ -137,7 +137,7 @@ kfree_pool(struct kmem *m, char *v) ...@@ -137,7 +137,7 @@ kfree_pool(struct kmem *m, char *v)
panic("kfree_pool: unknown region %p", v); panic("kfree_pool: unknown region %p", v);
// Fill with junk to catch dangling refs. // Fill with junk to catch dangling refs.
if (kinited && kalloc_memset && m->size <= 16384) if (kalloc_memset && kinited && m->size <= 16384)
memset(v, 1, m->size); memset(v, 1, m->size);
acquire(&m->lock); acquire(&m->lock);
...@@ -239,7 +239,7 @@ initkalloc(u64 mbaddr) ...@@ -239,7 +239,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
kmems[c].name[0] = (char) c + '0'; kmems[c].name[0] = (char) c + '0';
safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1); safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1);
initlock(&kmems[c].lock, kmems[c].name); initlock(&kmems[c].lock, kmems[c].name, LOCKSTAT_KALLOC);
kmems[c].size = PGSIZE; kmems[c].size = PGSIZE;
} }
...@@ -247,7 +247,7 @@ initkalloc(u64 mbaddr) ...@@ -247,7 +247,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
slabmem[i][c].name[0] = (char) c + '0'; slabmem[i][c].name[0] = (char) c + '0';
initlock(&slabmem[i][c].lock, initlock(&slabmem[i][c].lock,
slabmem[i][c].name); slabmem[i][c].name, LOCKSTAT_KALLOC);
} }
} }
...@@ -281,6 +281,7 @@ initkalloc(u64 mbaddr) ...@@ -281,6 +281,7 @@ initkalloc(u64 mbaddr)
void void
kfree(void *v) kfree(void *v)
{ {
verifyfree(v, mykmem()->size);
kfree_pool(mykmem(), v); kfree_pool(mykmem(), v);
} }
...@@ -289,3 +290,22 @@ ksfree(slab_t slab, void *v) ...@@ -289,3 +290,22 @@ ksfree(slab_t slab, void *v)
{ {
kfree_pool(slabmem[slab], v); kfree_pool(slabmem[slab], v);
} }
void
verifyfree(char *ptr, u64 nbytes)
{
#if VERIFYFREE
char *p = ptr;
char *e = p + nbytes;
for (; p < e; p++) {
// Search for pointers in the ptr region
u64 x = *(uptr *)p;
if (KBASE < x && x < KBASE+(128ull<<30)) {
struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC)
panic("verifyunmarked: LOCKSTAT_MAGIC %p(%lu):%p->%p",
ptr, nbytes, p, kls);
}
}
#endif
}
...@@ -154,7 +154,7 @@ void* kmalloc(u64); ...@@ -154,7 +154,7 @@ void* kmalloc(u64);
void kmfree(void*); void kmfree(void*);
int kmalign(void **p, int align, u64 size); int kmalign(void **p, int align, u64 size);
void kmalignfree(void *); void kmalignfree(void *);
void verifyfree(char *ptr, u64 nbytes);
// kbd.c // kbd.c
void kbdintr(void); void kbdintr(void);
...@@ -268,10 +268,9 @@ void sampconf(void); ...@@ -268,10 +268,9 @@ void sampconf(void);
void acquire(struct spinlock*); void acquire(struct spinlock*);
int tryacquire(struct spinlock*); int tryacquire(struct spinlock*);
int holding(struct spinlock*); int holding(struct spinlock*);
void initlock(struct spinlock*, const char*); void initlock(struct spinlock*, const char*, int);
void destroylock(struct spinlock *lk);
void release(struct spinlock*); void release(struct spinlock*);
void lockstat_init(struct spinlock *lk);
void lockstat_stop(struct spinlock *lk);
// syscall.c // syscall.c
int argint64(int, u64*); int argint64(int, u64*);
......
...@@ -32,7 +32,7 @@ kminit(void) ...@@ -32,7 +32,7 @@ kminit(void)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
freelists[c].name[0] = (char) c + '0'; freelists[c].name[0] = (char) c + '0';
safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1); safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1);
initlock(&freelists[c].lock, freelists[c].name); initlock(&freelists[c].lock, freelists[c].name, LOCKSTAT_KMALLOC);
} }
} }
...@@ -102,6 +102,7 @@ kmfree(void *ap) ...@@ -102,6 +102,7 @@ kmfree(void *ap)
b = (long) h->next; b = (long) h->next;
if(b < 0 || b > KMMAX) if(b < 0 || b > KMMAX)
panic("kmfree bad bucket"); panic("kmfree bad bucket");
verifyfree(ap, (1 << b) - sizeof(struct header));
h->next = freelists[c].buckets[b]; h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h; freelists[c].buckets[b] = h;
......
#include "queue.h" #include "queue.h"
#define LOCKSTAT_MAGIC 0xb4cd79c1b2e46f40ull
struct cpulockstat { struct cpulockstat {
u64 acquires; u64 acquires;
u64 contends; u64 contends;
...@@ -17,7 +19,7 @@ struct lockstat { ...@@ -17,7 +19,7 @@ struct lockstat {
}; };
struct klockstat { struct klockstat {
u8 active; u64 magic;
LIST_ENTRY(klockstat) link; LIST_ENTRY(klockstat) link;
struct lockstat s; struct lockstat s;
}; };
...@@ -25,3 +27,18 @@ struct klockstat { ...@@ -25,3 +27,18 @@ struct klockstat {
#define LOCKSTAT_START 1 #define LOCKSTAT_START 1
#define LOCKSTAT_STOP 2 #define LOCKSTAT_STOP 2
#define LOCKSTAT_CLEAR 3 #define LOCKSTAT_CLEAR 3
// Debug knobs
#define LOCKSTAT_BIO 0
#define LOCKSTAT_CONDVAR 0
#define LOCKSTAT_CONSOLE 1
#define LOCKSTAT_CRANGE 1
#define LOCKSTAT_FS 1
#define LOCKSTAT_GC 1
#define LOCKSTAT_KALLOC 1
#define LOCKSTAT_KMALLOC 1
#define LOCKSTAT_NET 1
#define LOCKSTAT_PIPE 1
#define LOCKSTAT_PROC 1
#define LOCKSTAT_SCHED 1
#define LOCKSTAT_VM 1
...@@ -99,7 +99,7 @@ start_timer(struct timer_thread *t, void (*func)(void), ...@@ -99,7 +99,7 @@ start_timer(struct timer_thread *t, void (*func)(void),
t->nsec = 1000000000 / 1000*msec; t->nsec = 1000000000 / 1000*msec;
t->func = func; t->func = func;
initcondvar(&t->waitcv, name); initcondvar(&t->waitcv, name);
initlock(&t->waitlk, name); initlock(&t->waitlk, name, LOCKSTAT_NET);
p = threadalloc(net_timer, t); p = threadalloc(net_timer, t);
if (p == NULL) if (p == NULL)
panic("net: start_timer"); panic("net: start_timer");
...@@ -406,7 +406,7 @@ initnet(void) ...@@ -406,7 +406,7 @@ initnet(void)
void void
netrx(void *va, u16 len) netrx(void *va, u16 len)
{ {
cprintf("netrx: %u\n", len); netfree(va);
} }
long long
......
...@@ -257,5 +257,5 @@ lwip_core_sleep(struct condvar *c) ...@@ -257,5 +257,5 @@ lwip_core_sleep(struct condvar *c)
void void
lwip_core_init(void) lwip_core_init(void)
{ {
initlock(&lwprot.lk, "lwIP lwprot"); initlock(&lwprot.lk, "lwIP lwprot", 1);
} }
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define VERBOSE 0 // print kernel diagnostics #define VERBOSE 0 // print kernel diagnostics
#define SPINLOCK_DEBUG 1 // Debug spin locks #define SPINLOCK_DEBUG 1 // Debug spin locks
#define LOCKSTAT 0 #define LOCKSTAT 0
#define VERIFYFREE LOCKSTAT
#if defined(HW_josmp) #if defined(HW_josmp)
#define NCPU 16 // maximum number of CPUs #define NCPU 16 // maximum number of CPUs
#define MTRACE 0 #define MTRACE 0
......
...@@ -36,7 +36,7 @@ pipealloc(struct file **f0, struct file **f1) ...@@ -36,7 +36,7 @@ pipealloc(struct file **f0, struct file **f1)
p->writeopen = 1; p->writeopen = 1;
p->nwrite = 0; p->nwrite = 0;
p->nread = 0; p->nread = 0;
initlock(&p->lock, "pipe"); initlock(&p->lock, "pipe", LOCKSTAT_PIPE);
initcondvar(&p->cv, "pipe"); initcondvar(&p->cv, "pipe");
(*f0)->type = FD_PIPE; (*f0)->type = FD_PIPE;
(*f0)->readable = 1; (*f0)->readable = 1;
...@@ -50,8 +50,10 @@ pipealloc(struct file **f0, struct file **f1) ...@@ -50,8 +50,10 @@ pipealloc(struct file **f0, struct file **f1)
//PAGEBREAK: 20 //PAGEBREAK: 20
bad: bad:
if(p) if(p) {
destroylock(&p->lock);
kfree((char*)p); kfree((char*)p);
}
if(*f0) if(*f0)
fileclose(*f0); fileclose(*f0);
if(*f1) if(*f1)
...@@ -71,6 +73,7 @@ pipeclose(struct pipe *p, int writable) ...@@ -71,6 +73,7 @@ pipeclose(struct pipe *p, int writable)
cv_wakeup(&p->cv); cv_wakeup(&p->cv);
if(p->readopen == 0 && p->writeopen == 0){ if(p->readopen == 0 && p->writeopen == 0){
release(&p->lock); release(&p->lock);
destroylock(&p->lock);
kfree((char*)p); kfree((char*)p);
} else } else
release(&p->lock); release(&p->lock);
......
...@@ -174,7 +174,7 @@ exit(void) ...@@ -174,7 +174,7 @@ exit(void)
static void static void
freeproc(struct proc *p) freeproc(struct proc *p)
{ {
lockstat_stop(&p->lock); destroylock(&p->lock);
gc_delayed(p, kmfree); gc_delayed(p, kmfree);
} }
...@@ -204,8 +204,7 @@ allocproc(void) ...@@ -204,8 +204,7 @@ allocproc(void)
#endif #endif
snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid); snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid);
initlock(&p->lock, p->lockname+3); initlock(&p->lock, p->lockname+3, LOCKSTAT_PROC);
lockstat_init(&p->lock);
initcondvar(&p->cv, p->lockname); initcondvar(&p->cv, p->lockname);
initwqframe(&p->wqframe); initwqframe(&p->wqframe);
......
...@@ -126,7 +126,7 @@ initsched(void) ...@@ -126,7 +126,7 @@ initsched(void)
int i; int i;
for (i = 0; i < NCPU; i++) { for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq"); initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q); STAILQ_INIT(&runq[i].q);
} }
} }
...@@ -97,72 +97,7 @@ holding(struct spinlock *lock) ...@@ -97,72 +97,7 @@ holding(struct spinlock *lock)
} }
#endif #endif
void
initlock(struct spinlock *lk, const char *name)
{
#if SPINLOCK_DEBUG
lk->name = name;
lk->cpu = 0;
#endif
#if LOCKSTAT #if LOCKSTAT
lk->stat = NULL;
#endif
lk->locked = 0;
}
int
tryacquire(struct spinlock *lk)
{
pushcli();
locking(lk);
if (xchg32(&lk->locked, 1) != 0) {
popcli();
return 0;
}
locked(lk, 0);
return 1;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
u64 retries;
pushcli();
locking(lk);
retries = 0;
while(xchg32(&lk->locked, 1) != 0)
retries++;
locked(lk, retries);
}
// Release the lock.
void
release(struct spinlock *lk)
{
releasing(lk);
// The xchg serializes, so that reads before release are
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
// 7.2) says reads can be carried out speculatively and in
// any order, which implies we need to serialize here.
// But the 2007 Intel 64 Architecture Memory Ordering White
// Paper says that Intel 64 and IA-32 will not move a load
// after a store. So lock->locked = 0 would work here.
// The xchg being asm volatile ensures gcc emits it after
// the above assignments (and after the critical section).
xchg32(&lk->locked, 0);
popcli();
}
#if LOCKSTAT
LIST_HEAD(lockstat_list, klockstat); LIST_HEAD(lockstat_list, klockstat);
static struct lockstat_list lockstat_list = LIST_HEAD_INITIALIZER(lockstat_list); static struct lockstat_list lockstat_list = LIST_HEAD_INITIALIZER(lockstat_list);
static struct spinlock lockstat_lock = { static struct spinlock lockstat_lock = {
...@@ -176,15 +111,13 @@ static struct spinlock lockstat_lock = { ...@@ -176,15 +111,13 @@ static struct spinlock lockstat_lock = {
void void
lockstat_init(struct spinlock *lk) lockstat_init(struct spinlock *lk)
{ {
if (lk->stat != NULL)
panic("initlockstat");
lk->stat = kmalloc(sizeof(*lk->stat)); lk->stat = kmalloc(sizeof(*lk->stat));
if (lk->stat == NULL) if (lk->stat == NULL)
return; return;
memset(lk->stat, 0, sizeof(*lk->stat)); memset(lk->stat, 0, sizeof(*lk->stat));
lk->stat->active = 1; lk->stat->magic = LOCKSTAT_MAGIC;
safestrcpy(lk->stat->s.name, lk->name, sizeof(lk->stat->s.name)); safestrcpy(lk->stat->s.name, lk->name, sizeof(lk->stat->s.name));
acquire(&lockstat_lock); acquire(&lockstat_lock);
...@@ -192,13 +125,11 @@ lockstat_init(struct spinlock *lk) ...@@ -192,13 +125,11 @@ lockstat_init(struct spinlock *lk)
release(&lockstat_lock); release(&lockstat_lock);
} }
void static void
lockstat_stop(struct spinlock *lk) lockstat_stop(struct spinlock *lk)
{ {
if (lk->stat != NULL) { if (lk->stat != NULL)
lk->stat->active = 0; lk->stat->magic = 0;
lk->stat = NULL;
}
} }
void void
...@@ -208,9 +139,11 @@ lockstat_clear(void) ...@@ -208,9 +139,11 @@ lockstat_clear(void)
acquire(&lockstat_lock); acquire(&lockstat_lock);
LIST_FOREACH_SAFE(stat, &lockstat_list, link, tmp) { LIST_FOREACH_SAFE(stat, &lockstat_list, link, tmp) {
if (stat->active == 0) { if (stat->magic == 0) {
LIST_REMOVE(stat, link); LIST_REMOVE(stat, link);
kmfree(stat); // So verifyfree doesn't follow le_next
stat->link.le_next = NULL;
gc_delayed(stat, kmfree);
} else { } else {
memset(&stat->s.cpu, 0, sizeof(stat->s.cpu)); memset(&stat->s.cpu, 0, sizeof(stat->s.cpu));
} }
...@@ -273,21 +206,83 @@ initlockstat(void) ...@@ -273,21 +206,83 @@ initlockstat(void)
devsw[DEVLOCKSTAT].write = lockstat_write; devsw[DEVLOCKSTAT].write = lockstat_write;
devsw[DEVLOCKSTAT].read = lockstat_read; devsw[DEVLOCKSTAT].read = lockstat_read;
} }
#else #else
void
initlockstat(void)
{
}
#endif
void void
lockstat_init(struct spinlock *lk) initlock(struct spinlock *lk, const char *name, int lockstat)
{ {
#if SPINLOCK_DEBUG
lk->name = name;
lk->cpu = 0;
#endif
#if LOCKSTAT
lk->stat = NULL;
if (lockstat)
lockstat_init(lk);
#endif
lk->locked = 0;
} }
void void
lockstat_stop(struct spinlock *lk) destroylock(struct spinlock *lk)
{ {
#if LOCKSTAT
lockstat_stop(lk);
#endif
} }
int
tryacquire(struct spinlock *lk)
{
pushcli();
locking(lk);
if (xchg32(&lk->locked, 1) != 0) {
popcli();
return 0;
}
locked(lk, 0);
return 1;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void void
initlockstat(void) acquire(struct spinlock *lk)
{ {
u64 retries;
pushcli();
locking(lk);
retries = 0;
while(xchg32(&lk->locked, 1) != 0)
retries++;
locked(lk, retries);
}
// Release the lock.
void
release(struct spinlock *lk)
{
releasing(lk);
// The xchg serializes, so that reads before release are
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
// 7.2) says reads can be carried out speculatively and in
// any order, which implies we need to serialize here.
// But the 2007 Intel 64 Architecture Memory Ordering White
// Paper says that Intel 64 and IA-32 will not move a load
// after a store. So lock->locked = 0 would work here.
// The xchg being asm volatile ensures gcc emits it after
// the above assignments (and after the critical section).
xchg32(&lk->locked, 0);
popcli();
} }
#endif
...@@ -24,7 +24,7 @@ vma_alloc(void) ...@@ -24,7 +24,7 @@ vma_alloc(void)
memset(e, 0, sizeof(struct vma)); memset(e, 0, sizeof(struct vma));
e->va_type = PRIVATE; e->va_type = PRIVATE;
snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e); snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e);
initlock(&e->lock, e->lockname); initlock(&e->lock, e->lockname, LOCKSTAT_VM);
return e; return e;
} }
...@@ -41,6 +41,7 @@ vma_free(void *p) ...@@ -41,6 +41,7 @@ vma_free(void *p)
struct vma *e = (struct vma *) p; struct vma *e = (struct vma *) p;
if(e->n) if(e->n)
vmn_decref(e->n); vmn_decref(e->n);
destroylock(&e->lock);
kmfree(e); kmfree(e);
} }
...@@ -107,11 +108,12 @@ vmap_alloc(void) ...@@ -107,11 +108,12 @@ vmap_alloc(void)
return 0; return 0;
memset(m, 0, sizeof(struct vmap)); memset(m, 0, sizeof(struct vmap));
snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m); snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m);
initlock(&m->lock, m->lockname); initlock(&m->lock, m->lockname, LOCKSTAT_VM);
m->ref = 1; m->ref = 1;
m->pml4 = setupkvm(); m->pml4 = setupkvm();
if (m->pml4 == 0) { if (m->pml4 == 0) {
cprintf("vmap_alloc: setupkvm out of memory\n"); cprintf("vmap_alloc: setupkvm out of memory\n");
destroylock(&m->lock);
kmfree(m); kmfree(m);
return 0; return 0;
} }
...@@ -331,6 +333,8 @@ vmap_free(void *p) ...@@ -331,6 +333,8 @@ vmap_free(void *p)
freevm(m->pml4); freevm(m->pml4);
m->pml4 = 0; m->pml4 = 0;
m->alloc = 0; m->alloc = 0;
destroylock(&m->lock);
kmfree(m);
} }
// Does any vma overlap start..start+len? // Does any vma overlap start..start+len?
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论