Add an "enable lockstat" flag to initlock.

You must call destroylock before freeing the memory holding lockstat enabled lock. I did this for most of the locks.
上级 2c179184
...@@ -97,6 +97,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -97,6 +97,7 @@ bget(u32 dev, u64 sector, int *writer)
victim->flags |= B_BUSY; victim->flags |= B_BUSY;
ns_remove(bufns, KII(victim->dev, victim->sector), victim); ns_remove(bufns, KII(victim->dev, victim->sector), victim);
release(&victim->lock); release(&victim->lock);
destroylock(&victim->lock);
gc_delayed(victim, kmfree); gc_delayed(victim, kmfree);
b = kmalloc(sizeof(*b)); b = kmalloc(sizeof(*b));
...@@ -105,10 +106,11 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -105,10 +106,11 @@ bget(u32 dev, u64 sector, int *writer)
b->flags = B_BUSY; b->flags = B_BUSY;
*writer = 1; *writer = 1;
snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector); snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector);
initlock(&b->lock, b->lockname+3); initlock(&b->lock, b->lockname+3, LOCKSTAT_BIO);
initcondvar(&b->cv, b->lockname); initcondvar(&b->cv, b->lockname);
gc_begin_epoch(); gc_begin_epoch();
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) { if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) {
destroylock(&b->lock);
gc_delayed(b, kmfree); gc_delayed(b, kmfree);
goto loop; goto loop;
} }
...@@ -168,7 +170,7 @@ initbio(void) ...@@ -168,7 +170,7 @@ initbio(void)
b->dev = 0xdeadbeef; b->dev = 0xdeadbeef;
b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */ b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */
b->flags = 0; b->flags = 0;
initlock(&b->lock, "bcache-lock"); initlock(&b->lock, "bcache-lock", LOCKSTAT_BIO);
initcondvar(&b->cv, "bcache-cv"); initcondvar(&b->cv, "bcache-cv");
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0)
panic("binit ns_insert"); panic("binit ns_insert");
......
...@@ -136,6 +136,6 @@ cv_wakeup(struct condvar *cv) ...@@ -136,6 +136,6 @@ cv_wakeup(struct condvar *cv)
void void
initcondvar(struct condvar *cv, const char *n) initcondvar(struct condvar *cv, const char *n)
{ {
initlock(&cv->lock, n); initlock(&cv->lock, n, LOCKSTAT_CONDVAR);
LIST_INIT(&cv->waiters); LIST_INIT(&cv->waiters);
} }
...@@ -320,7 +320,7 @@ consoleread(struct inode *ip, char *dst, u32 off, u32 n) ...@@ -320,7 +320,7 @@ consoleread(struct inode *ip, char *dst, u32 off, u32 n)
void void
initconsole(void) initconsole(void)
{ {
initlock(&cons.lock, "console"); initlock(&cons.lock, "console", LOCKSTAT_CONSOLE);
cons.locking = 1; cons.locking = 1;
devsw[CONSOLE].write = consolewrite; devsw[CONSOLE].write = consolewrite;
......
...@@ -62,7 +62,7 @@ crange_alloc(int nlevel) ...@@ -62,7 +62,7 @@ crange_alloc(int nlevel)
cr->crange_head.size = 0; cr->crange_head.size = 0;
assert(kmalign((void **) &cr->crange_head.lock, assert(kmalign((void **) &cr->crange_head.lock,
CACHELINE, sizeof(struct spinlock)) == 0); CACHELINE, sizeof(struct spinlock)) == 0);
initlock(cr->crange_head.lock, "head lock"); initlock(cr->crange_head.lock, "head lock", LOCKSTAT_CRANGE);
cr->crange_head.next = kmalloc(sizeof(cr->crange_head.next[0]) * nlevel); cr->crange_head.next = kmalloc(sizeof(cr->crange_head.next[0]) * nlevel);
for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0; for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0;
if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr); if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr);
...@@ -82,6 +82,7 @@ crange_free(struct crange *cr) ...@@ -82,6 +82,7 @@ crange_free(struct crange *cr)
clist_range_free(e); clist_range_free(e);
} }
kmfree(cr->crange_head.next); kmfree(cr->crange_head.next);
destroylock(cr->crange_head.lock);
kmalignfree(cr->crange_head.lock); kmalignfree(cr->crange_head.lock);
kmalignfree(cr); kmalignfree(cr);
} }
...@@ -176,6 +177,7 @@ clist_range_free(void *p) ...@@ -176,6 +177,7 @@ clist_range_free(void *p)
for (int l = 0; l < e->nlevel; l++) { for (int l = 0; l < e->nlevel; l++) {
e->next[l] = (struct clist_range *) 0xDEADBEEF; e->next[l] = (struct clist_range *) 0xDEADBEEF;
} }
destroylock(e->lock);
kmalignfree(e->lock); kmalignfree(e->lock);
kmfree(e->next); kmfree(e->next);
kmalignfree(e); kmalignfree(e);
...@@ -210,7 +212,7 @@ crange_new(struct crange *cr, u64 k, u64 sz, void *v, struct clist_range *n) ...@@ -210,7 +212,7 @@ crange_new(struct crange *cr, u64 k, u64 sz, void *v, struct clist_range *n)
for (int l = 1; l < r->nlevel; l++) r->next[l] = 0; for (int l = 1; l < r->nlevel; l++) r->next[l] = 0;
assert(kmalign((void **) &r->lock, CACHELINE, assert(kmalign((void **) &r->lock, CACHELINE,
sizeof(struct spinlock)) == 0); sizeof(struct spinlock)) == 0);
initlock(r->lock, "crange"); initlock(r->lock, "crange", LOCKSTAT_CRANGE);
r->cr = cr; r->cr = cr;
return r; return r;
} }
......
...@@ -275,7 +275,7 @@ e1000attach(struct pci_func *pcif) ...@@ -275,7 +275,7 @@ e1000attach(struct pci_func *pcif)
pci_func_enable(pcif); pci_func_enable(pcif);
initlock(&e1000.lk, "e1000"); initlock(&e1000.lk, "e1000", 1);
e1000.membase = pcif->reg_base[0]; e1000.membase = pcif->reg_base[0];
e1000.iobase = pcif->reg_base[2]; e1000.iobase = pcif->reg_base[2];
e1000.pcidevid = pcif->dev_id; e1000.pcidevid = pcif->dev_id;
......
...@@ -234,6 +234,7 @@ ifree(void *arg) ...@@ -234,6 +234,7 @@ ifree(void *arg)
ip->dir = 0; ip->dir = 0;
} }
destroylock(&ip->lock);
kmfree(ip); kmfree(ip);
} }
...@@ -295,10 +296,11 @@ iget(u32 dev, u32 inum) ...@@ -295,10 +296,11 @@ iget(u32 dev, u32 inum)
ip->flags = I_BUSYR | I_BUSYW; ip->flags = I_BUSYR | I_BUSYW;
ip->readbusy = 1; ip->readbusy = 1;
snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum); snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum);
initlock(&ip->lock, ip->lockname+3); initlock(&ip->lock, ip->lockname+3, LOCKSTAT_FS);
initcondvar(&ip->cv, ip->lockname); initcondvar(&ip->cv, ip->lockname);
ip->dir = 0; ip->dir = 0;
if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) { if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) {
destroylock(&ip->lock);
gc_delayed(ip, kmfree); gc_delayed(ip, kmfree);
goto retry; goto retry;
} }
......
...@@ -277,7 +277,7 @@ gc_worker(void *x) ...@@ -277,7 +277,7 @@ gc_worker(void *x)
if (VERBOSE) if (VERBOSE)
cprintf("gc_worker: %d\n", mycpu()->id); cprintf("gc_worker: %d\n", mycpu()->id);
initlock(&wl, "rcu_gc_worker dummy"); // dummy lock initlock(&wl, "rcu_gc_worker dummy", LOCKSTAT_GC); // dummy lock
for (;;) { for (;;) {
u64 i; u64 i;
acquire(&wl); acquire(&wl);
...@@ -305,13 +305,13 @@ initprocgc(struct proc *p) ...@@ -305,13 +305,13 @@ initprocgc(struct proc *p)
{ {
p->epoch = global_epoch; p->epoch = global_epoch;
p->epoch_depth = 0; p->epoch_depth = 0;
initlock(&p->gc_epoch_lock, "per process gc_lock"); initlock(&p->gc_epoch_lock, "per process gc_lock", 0);
} }
void void
initgc(void) initgc(void)
{ {
initlock(&gc_lock.l, "gc"); initlock(&gc_lock.l, "gc", LOCKSTAT_GC);
global_epoch = NEPOCH-2; global_epoch = NEPOCH-2;
for (int i = 0; i < ncpu; i++) { for (int i = 0; i < ncpu; i++) {
......
...@@ -136,12 +136,9 @@ kfree_pool(struct kmem *m, char *v) ...@@ -136,12 +136,9 @@ kfree_pool(struct kmem *m, char *v)
if (memsize(v) == -1ull) if (memsize(v) == -1ull)
panic("kfree_pool: unknown region %p", v); panic("kfree_pool: unknown region %p", v);
if (kinited && m->size <= 16384) {
verifyfree(v, m->size);
// Fill with junk to catch dangling refs. // Fill with junk to catch dangling refs.
if (kalloc_memset) if (kalloc_memset && kinited && m->size <= 16384)
memset(v, 1, m->size); memset(v, 1, m->size);
}
acquire(&m->lock); acquire(&m->lock);
r = (struct run*)v; r = (struct run*)v;
...@@ -242,7 +239,7 @@ initkalloc(u64 mbaddr) ...@@ -242,7 +239,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
kmems[c].name[0] = (char) c + '0'; kmems[c].name[0] = (char) c + '0';
safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1); safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1);
initlock(&kmems[c].lock, kmems[c].name); initlock(&kmems[c].lock, kmems[c].name, LOCKSTAT_KALLOC);
kmems[c].size = PGSIZE; kmems[c].size = PGSIZE;
} }
...@@ -250,7 +247,7 @@ initkalloc(u64 mbaddr) ...@@ -250,7 +247,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
slabmem[i][c].name[0] = (char) c + '0'; slabmem[i][c].name[0] = (char) c + '0';
initlock(&slabmem[i][c].lock, initlock(&slabmem[i][c].lock,
slabmem[i][c].name); slabmem[i][c].name, LOCKSTAT_KALLOC);
} }
} }
...@@ -284,6 +281,7 @@ initkalloc(u64 mbaddr) ...@@ -284,6 +281,7 @@ initkalloc(u64 mbaddr)
void void
kfree(void *v) kfree(void *v)
{ {
verifyfree(v, mykmem()->size);
kfree_pool(mykmem(), v); kfree_pool(mykmem(), v);
} }
...@@ -297,14 +295,16 @@ void ...@@ -297,14 +295,16 @@ void
verifyfree(char *ptr, u64 nbytes) verifyfree(char *ptr, u64 nbytes)
{ {
#if VERIFYFREE #if VERIFYFREE
char *e = ptr + nbytes; char *p = ptr;
for (; ptr < e; ptr++) { char *e = p + nbytes;
for (; p < e; p++) {
// Search for pointers in the ptr region // Search for pointers in the ptr region
u64 x = *(uptr *)ptr; u64 x = *(uptr *)p;
if (KBASE < x && x < KBASE+(128ull<<30)) { if (KBASE < x && x < KBASE+(128ull<<30)) {
struct klockstat *kls = (struct klockstat *) x; struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC) if (kls->magic == LOCKSTAT_MAGIC)
panic("verifyunmarked: LOCKSTAT_MAGIC %p:%lu", ptr, nbytes); panic("verifyunmarked: LOCKSTAT_MAGIC %p(%lu):%p->%p",
ptr, nbytes, p, kls);
} }
} }
#endif #endif
......
...@@ -268,7 +268,7 @@ void sampconf(void); ...@@ -268,7 +268,7 @@ void sampconf(void);
void acquire(struct spinlock*); void acquire(struct spinlock*);
int tryacquire(struct spinlock*); int tryacquire(struct spinlock*);
int holding(struct spinlock*); int holding(struct spinlock*);
void initlock(struct spinlock*, const char*); void initlock(struct spinlock*, const char*, int);
void destroylock(struct spinlock *lk); void destroylock(struct spinlock *lk);
void release(struct spinlock*); void release(struct spinlock*);
......
...@@ -32,7 +32,7 @@ kminit(void) ...@@ -32,7 +32,7 @@ kminit(void)
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
freelists[c].name[0] = (char) c + '0'; freelists[c].name[0] = (char) c + '0';
safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1); safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1);
initlock(&freelists[c].lock, freelists[c].name); initlock(&freelists[c].lock, freelists[c].name, LOCKSTAT_KMALLOC);
} }
} }
...@@ -102,7 +102,7 @@ kmfree(void *ap) ...@@ -102,7 +102,7 @@ kmfree(void *ap)
b = (long) h->next; b = (long) h->next;
if(b < 0 || b > KMMAX) if(b < 0 || b > KMMAX)
panic("kmfree bad bucket"); panic("kmfree bad bucket");
verifyfree(ap, 1 << b); verifyfree(ap, (1 << b) - sizeof(struct header));
h->next = freelists[c].buckets[b]; h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h; freelists[c].buckets[b] = h;
......
...@@ -27,3 +27,18 @@ struct klockstat { ...@@ -27,3 +27,18 @@ struct klockstat {
#define LOCKSTAT_START 1 #define LOCKSTAT_START 1
#define LOCKSTAT_STOP 2 #define LOCKSTAT_STOP 2
#define LOCKSTAT_CLEAR 3 #define LOCKSTAT_CLEAR 3
// Debug knobs
#define LOCKSTAT_BIO 0
#define LOCKSTAT_CONDVAR 0
#define LOCKSTAT_CONSOLE 1
#define LOCKSTAT_CRANGE 1
#define LOCKSTAT_FS 1
#define LOCKSTAT_GC 1
#define LOCKSTAT_KALLOC 1
#define LOCKSTAT_KMALLOC 1
#define LOCKSTAT_NET 1
#define LOCKSTAT_PIPE 1
#define LOCKSTAT_PROC 1
#define LOCKSTAT_SCHED 1
#define LOCKSTAT_VM 1
...@@ -99,7 +99,7 @@ start_timer(struct timer_thread *t, void (*func)(void), ...@@ -99,7 +99,7 @@ start_timer(struct timer_thread *t, void (*func)(void),
t->nsec = 1000000000 / 1000*msec; t->nsec = 1000000000 / 1000*msec;
t->func = func; t->func = func;
initcondvar(&t->waitcv, name); initcondvar(&t->waitcv, name);
initlock(&t->waitlk, name); initlock(&t->waitlk, name, LOCKSTAT_NET);
p = threadalloc(net_timer, t); p = threadalloc(net_timer, t);
if (p == NULL) if (p == NULL)
panic("net: start_timer"); panic("net: start_timer");
......
...@@ -257,5 +257,5 @@ lwip_core_sleep(struct condvar *c) ...@@ -257,5 +257,5 @@ lwip_core_sleep(struct condvar *c)
void void
lwip_core_init(void) lwip_core_init(void)
{ {
initlock(&lwprot.lk, "lwIP lwprot"); initlock(&lwprot.lk, "lwIP lwprot", 1);
} }
...@@ -36,7 +36,7 @@ pipealloc(struct file **f0, struct file **f1) ...@@ -36,7 +36,7 @@ pipealloc(struct file **f0, struct file **f1)
p->writeopen = 1; p->writeopen = 1;
p->nwrite = 0; p->nwrite = 0;
p->nread = 0; p->nread = 0;
initlock(&p->lock, "pipe"); initlock(&p->lock, "pipe", LOCKSTAT_PIPE);
initcondvar(&p->cv, "pipe"); initcondvar(&p->cv, "pipe");
(*f0)->type = FD_PIPE; (*f0)->type = FD_PIPE;
(*f0)->readable = 1; (*f0)->readable = 1;
...@@ -50,8 +50,10 @@ pipealloc(struct file **f0, struct file **f1) ...@@ -50,8 +50,10 @@ pipealloc(struct file **f0, struct file **f1)
//PAGEBREAK: 20 //PAGEBREAK: 20
bad: bad:
if(p) if(p) {
destroylock(&p->lock);
kfree((char*)p); kfree((char*)p);
}
if(*f0) if(*f0)
fileclose(*f0); fileclose(*f0);
if(*f1) if(*f1)
...@@ -71,6 +73,7 @@ pipeclose(struct pipe *p, int writable) ...@@ -71,6 +73,7 @@ pipeclose(struct pipe *p, int writable)
cv_wakeup(&p->cv); cv_wakeup(&p->cv);
if(p->readopen == 0 && p->writeopen == 0){ if(p->readopen == 0 && p->writeopen == 0){
release(&p->lock); release(&p->lock);
destroylock(&p->lock);
kfree((char*)p); kfree((char*)p);
} else } else
release(&p->lock); release(&p->lock);
......
...@@ -204,7 +204,7 @@ allocproc(void) ...@@ -204,7 +204,7 @@ allocproc(void)
#endif #endif
snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid); snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid);
initlock(&p->lock, p->lockname+3); initlock(&p->lock, p->lockname+3, LOCKSTAT_PROC);
initcondvar(&p->cv, p->lockname); initcondvar(&p->cv, p->lockname);
initwqframe(&p->wqframe); initwqframe(&p->wqframe);
......
...@@ -126,7 +126,7 @@ initsched(void) ...@@ -126,7 +126,7 @@ initsched(void)
int i; int i;
for (i = 0; i < NCPU; i++) { for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq"); initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q); STAILQ_INIT(&runq[i].q);
} }
} }
...@@ -114,6 +114,7 @@ lockstat_init(struct spinlock *lk) ...@@ -114,6 +114,7 @@ lockstat_init(struct spinlock *lk)
lk->stat = kmalloc(sizeof(*lk->stat)); lk->stat = kmalloc(sizeof(*lk->stat));
if (lk->stat == NULL) if (lk->stat == NULL)
return; return;
memset(lk->stat, 0, sizeof(*lk->stat)); memset(lk->stat, 0, sizeof(*lk->stat));
lk->stat->magic = LOCKSTAT_MAGIC; lk->stat->magic = LOCKSTAT_MAGIC;
...@@ -140,6 +141,8 @@ lockstat_clear(void) ...@@ -140,6 +141,8 @@ lockstat_clear(void)
LIST_FOREACH_SAFE(stat, &lockstat_list, link, tmp) { LIST_FOREACH_SAFE(stat, &lockstat_list, link, tmp) {
if (stat->magic == 0) { if (stat->magic == 0) {
LIST_REMOVE(stat, link); LIST_REMOVE(stat, link);
// So verifyfree doesn't follow le_next
stat->link.le_next = NULL;
gc_delayed(stat, kmfree); gc_delayed(stat, kmfree);
} else { } else {
memset(&stat->s.cpu, 0, sizeof(stat->s.cpu)); memset(&stat->s.cpu, 0, sizeof(stat->s.cpu));
...@@ -211,13 +214,15 @@ initlockstat(void) ...@@ -211,13 +214,15 @@ initlockstat(void)
#endif #endif
void void
initlock(struct spinlock *lk, const char *name) initlock(struct spinlock *lk, const char *name, int lockstat)
{ {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
lk->name = name; lk->name = name;
lk->cpu = 0; lk->cpu = 0;
#endif #endif
#if LOCKSTAT #if LOCKSTAT
lk->stat = NULL;
if (lockstat)
lockstat_init(lk); lockstat_init(lk);
#endif #endif
lk->locked = 0; lk->locked = 0;
......
...@@ -24,7 +24,7 @@ vma_alloc(void) ...@@ -24,7 +24,7 @@ vma_alloc(void)
memset(e, 0, sizeof(struct vma)); memset(e, 0, sizeof(struct vma));
e->va_type = PRIVATE; e->va_type = PRIVATE;
snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e); snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e);
initlock(&e->lock, e->lockname); initlock(&e->lock, e->lockname, LOCKSTAT_VM);
return e; return e;
} }
...@@ -41,6 +41,7 @@ vma_free(void *p) ...@@ -41,6 +41,7 @@ vma_free(void *p)
struct vma *e = (struct vma *) p; struct vma *e = (struct vma *) p;
if(e->n) if(e->n)
vmn_decref(e->n); vmn_decref(e->n);
destroylock(&e->lock);
kmfree(e); kmfree(e);
} }
...@@ -107,11 +108,12 @@ vmap_alloc(void) ...@@ -107,11 +108,12 @@ vmap_alloc(void)
return 0; return 0;
memset(m, 0, sizeof(struct vmap)); memset(m, 0, sizeof(struct vmap));
snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m); snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m);
initlock(&m->lock, m->lockname); initlock(&m->lock, m->lockname, LOCKSTAT_VM);
m->ref = 1; m->ref = 1;
m->pml4 = setupkvm(); m->pml4 = setupkvm();
if (m->pml4 == 0) { if (m->pml4 == 0) {
cprintf("vmap_alloc: setupkvm out of memory\n"); cprintf("vmap_alloc: setupkvm out of memory\n");
destroylock(&m->lock);
kmfree(m); kmfree(m);
return 0; return 0;
} }
...@@ -331,6 +333,7 @@ vmap_free(void *p) ...@@ -331,6 +333,7 @@ vmap_free(void *p)
freevm(m->pml4); freevm(m->pml4);
m->pml4 = 0; m->pml4 = 0;
m->alloc = 0; m->alloc = 0;
destroylock(&m->lock);
} }
// Does any vma overlap start..start+len? // Does any vma overlap start..start+len?
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论