Add an "enable lockstat" flag to initlock.

You must call destroylock before freeing the memory holding lockstat enabled lock. I did this for most of the locks.
上级 2c179184
......@@ -97,6 +97,7 @@ bget(u32 dev, u64 sector, int *writer)
victim->flags |= B_BUSY;
ns_remove(bufns, KII(victim->dev, victim->sector), victim);
release(&victim->lock);
destroylock(&victim->lock);
gc_delayed(victim, kmfree);
b = kmalloc(sizeof(*b));
......@@ -105,10 +106,11 @@ bget(u32 dev, u64 sector, int *writer)
b->flags = B_BUSY;
*writer = 1;
snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector);
initlock(&b->lock, b->lockname+3);
initlock(&b->lock, b->lockname+3, LOCKSTAT_BIO);
initcondvar(&b->cv, b->lockname);
gc_begin_epoch();
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0) {
destroylock(&b->lock);
gc_delayed(b, kmfree);
goto loop;
}
......@@ -168,7 +170,7 @@ initbio(void)
b->dev = 0xdeadbeef;
b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */
b->flags = 0;
initlock(&b->lock, "bcache-lock");
initlock(&b->lock, "bcache-lock", LOCKSTAT_BIO);
initcondvar(&b->cv, "bcache-cv");
if (ns_insert(bufns, KII(b->dev, b->sector), b) < 0)
panic("binit ns_insert");
......
......@@ -136,6 +136,6 @@ cv_wakeup(struct condvar *cv)
void
initcondvar(struct condvar *cv, const char *n)
{
initlock(&cv->lock, n);
initlock(&cv->lock, n, LOCKSTAT_CONDVAR);
LIST_INIT(&cv->waiters);
}
......@@ -320,7 +320,7 @@ consoleread(struct inode *ip, char *dst, u32 off, u32 n)
void
initconsole(void)
{
initlock(&cons.lock, "console");
initlock(&cons.lock, "console", LOCKSTAT_CONSOLE);
cons.locking = 1;
devsw[CONSOLE].write = consolewrite;
......
......@@ -62,7 +62,7 @@ crange_alloc(int nlevel)
cr->crange_head.size = 0;
assert(kmalign((void **) &cr->crange_head.lock,
CACHELINE, sizeof(struct spinlock)) == 0);
initlock(cr->crange_head.lock, "head lock");
initlock(cr->crange_head.lock, "head lock", LOCKSTAT_CRANGE);
cr->crange_head.next = kmalloc(sizeof(cr->crange_head.next[0]) * nlevel);
for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0;
if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr);
......@@ -82,6 +82,7 @@ crange_free(struct crange *cr)
clist_range_free(e);
}
kmfree(cr->crange_head.next);
destroylock(cr->crange_head.lock);
kmalignfree(cr->crange_head.lock);
kmalignfree(cr);
}
......@@ -176,6 +177,7 @@ clist_range_free(void *p)
for (int l = 0; l < e->nlevel; l++) {
e->next[l] = (struct clist_range *) 0xDEADBEEF;
}
destroylock(e->lock);
kmalignfree(e->lock);
kmfree(e->next);
kmalignfree(e);
......@@ -210,7 +212,7 @@ crange_new(struct crange *cr, u64 k, u64 sz, void *v, struct clist_range *n)
for (int l = 1; l < r->nlevel; l++) r->next[l] = 0;
assert(kmalign((void **) &r->lock, CACHELINE,
sizeof(struct spinlock)) == 0);
initlock(r->lock, "crange");
initlock(r->lock, "crange", LOCKSTAT_CRANGE);
r->cr = cr;
return r;
}
......
......@@ -275,7 +275,7 @@ e1000attach(struct pci_func *pcif)
pci_func_enable(pcif);
initlock(&e1000.lk, "e1000");
initlock(&e1000.lk, "e1000", 1);
e1000.membase = pcif->reg_base[0];
e1000.iobase = pcif->reg_base[2];
e1000.pcidevid = pcif->dev_id;
......
......@@ -234,6 +234,7 @@ ifree(void *arg)
ip->dir = 0;
}
destroylock(&ip->lock);
kmfree(ip);
}
......@@ -295,10 +296,11 @@ iget(u32 dev, u32 inum)
ip->flags = I_BUSYR | I_BUSYW;
ip->readbusy = 1;
snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum);
initlock(&ip->lock, ip->lockname+3);
initlock(&ip->lock, ip->lockname+3, LOCKSTAT_FS);
initcondvar(&ip->cv, ip->lockname);
ip->dir = 0;
if (ns_insert(ins, KII(ip->dev, ip->inum), ip) < 0) {
destroylock(&ip->lock);
gc_delayed(ip, kmfree);
goto retry;
}
......
......@@ -277,7 +277,7 @@ gc_worker(void *x)
if (VERBOSE)
cprintf("gc_worker: %d\n", mycpu()->id);
initlock(&wl, "rcu_gc_worker dummy"); // dummy lock
initlock(&wl, "rcu_gc_worker dummy", LOCKSTAT_GC); // dummy lock
for (;;) {
u64 i;
acquire(&wl);
......@@ -305,13 +305,13 @@ initprocgc(struct proc *p)
{
p->epoch = global_epoch;
p->epoch_depth = 0;
initlock(&p->gc_epoch_lock, "per process gc_lock");
initlock(&p->gc_epoch_lock, "per process gc_lock", 0);
}
void
initgc(void)
{
initlock(&gc_lock.l, "gc");
initlock(&gc_lock.l, "gc", LOCKSTAT_GC);
global_epoch = NEPOCH-2;
for (int i = 0; i < ncpu; i++) {
......
......@@ -136,12 +136,9 @@ kfree_pool(struct kmem *m, char *v)
if (memsize(v) == -1ull)
panic("kfree_pool: unknown region %p", v);
if (kinited && m->size <= 16384) {
verifyfree(v, m->size);
// Fill with junk to catch dangling refs.
if (kalloc_memset)
memset(v, 1, m->size);
}
// Fill with junk to catch dangling refs.
if (kalloc_memset && kinited && m->size <= 16384)
memset(v, 1, m->size);
acquire(&m->lock);
r = (struct run*)v;
......@@ -242,7 +239,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) {
kmems[c].name[0] = (char) c + '0';
safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1);
initlock(&kmems[c].lock, kmems[c].name);
initlock(&kmems[c].lock, kmems[c].name, LOCKSTAT_KALLOC);
kmems[c].size = PGSIZE;
}
......@@ -250,7 +247,7 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) {
slabmem[i][c].name[0] = (char) c + '0';
initlock(&slabmem[i][c].lock,
slabmem[i][c].name);
slabmem[i][c].name, LOCKSTAT_KALLOC);
}
}
......@@ -284,6 +281,7 @@ initkalloc(u64 mbaddr)
void
kfree(void *v)
{
verifyfree(v, mykmem()->size);
kfree_pool(mykmem(), v);
}
......@@ -297,14 +295,16 @@ void
verifyfree(char *ptr, u64 nbytes)
{
#if VERIFYFREE
char *e = ptr + nbytes;
for (; ptr < e; ptr++) {
char *p = ptr;
char *e = p + nbytes;
for (; p < e; p++) {
// Search for pointers in the ptr region
u64 x = *(uptr *)ptr;
u64 x = *(uptr *)p;
if (KBASE < x && x < KBASE+(128ull<<30)) {
struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC)
panic("verifyunmarked: LOCKSTAT_MAGIC %p:%lu", ptr, nbytes);
panic("verifyunmarked: LOCKSTAT_MAGIC %p(%lu):%p->%p",
ptr, nbytes, p, kls);
}
}
#endif
......
......@@ -268,7 +268,7 @@ void sampconf(void);
void acquire(struct spinlock*);
int tryacquire(struct spinlock*);
int holding(struct spinlock*);
void initlock(struct spinlock*, const char*);
void initlock(struct spinlock*, const char*, int);
void destroylock(struct spinlock *lk);
void release(struct spinlock*);
......
......@@ -32,7 +32,7 @@ kminit(void)
for (int c = 0; c < NCPU; c++) {
freelists[c].name[0] = (char) c + '0';
safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1);
initlock(&freelists[c].lock, freelists[c].name);
initlock(&freelists[c].lock, freelists[c].name, LOCKSTAT_KMALLOC);
}
}
......@@ -102,7 +102,7 @@ kmfree(void *ap)
b = (long) h->next;
if(b < 0 || b > KMMAX)
panic("kmfree bad bucket");
verifyfree(ap, 1 << b);
verifyfree(ap, (1 << b) - sizeof(struct header));
h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h;
......
......@@ -24,6 +24,21 @@ struct klockstat {
struct lockstat s;
};
#define LOCKSTAT_START 1
#define LOCKSTAT_STOP 2
#define LOCKSTAT_CLEAR 3
#define LOCKSTAT_START 1
#define LOCKSTAT_STOP 2
#define LOCKSTAT_CLEAR 3
// Debug knobs
#define LOCKSTAT_BIO 0
#define LOCKSTAT_CONDVAR 0
#define LOCKSTAT_CONSOLE 1
#define LOCKSTAT_CRANGE 1
#define LOCKSTAT_FS 1
#define LOCKSTAT_GC 1
#define LOCKSTAT_KALLOC 1
#define LOCKSTAT_KMALLOC 1
#define LOCKSTAT_NET 1
#define LOCKSTAT_PIPE 1
#define LOCKSTAT_PROC 1
#define LOCKSTAT_SCHED 1
#define LOCKSTAT_VM 1
......@@ -99,7 +99,7 @@ start_timer(struct timer_thread *t, void (*func)(void),
t->nsec = 1000000000 / 1000*msec;
t->func = func;
initcondvar(&t->waitcv, name);
initlock(&t->waitlk, name);
initlock(&t->waitlk, name, LOCKSTAT_NET);
p = threadalloc(net_timer, t);
if (p == NULL)
panic("net: start_timer");
......
......@@ -257,5 +257,5 @@ lwip_core_sleep(struct condvar *c)
void
lwip_core_init(void)
{
initlock(&lwprot.lk, "lwIP lwprot");
initlock(&lwprot.lk, "lwIP lwprot", 1);
}
......@@ -36,7 +36,7 @@ pipealloc(struct file **f0, struct file **f1)
p->writeopen = 1;
p->nwrite = 0;
p->nread = 0;
initlock(&p->lock, "pipe");
initlock(&p->lock, "pipe", LOCKSTAT_PIPE);
initcondvar(&p->cv, "pipe");
(*f0)->type = FD_PIPE;
(*f0)->readable = 1;
......@@ -50,8 +50,10 @@ pipealloc(struct file **f0, struct file **f1)
//PAGEBREAK: 20
bad:
if(p)
if(p) {
destroylock(&p->lock);
kfree((char*)p);
}
if(*f0)
fileclose(*f0);
if(*f1)
......@@ -71,6 +73,7 @@ pipeclose(struct pipe *p, int writable)
cv_wakeup(&p->cv);
if(p->readopen == 0 && p->writeopen == 0){
release(&p->lock);
destroylock(&p->lock);
kfree((char*)p);
} else
release(&p->lock);
......
......@@ -204,7 +204,7 @@ allocproc(void)
#endif
snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid);
initlock(&p->lock, p->lockname+3);
initlock(&p->lock, p->lockname+3, LOCKSTAT_PROC);
initcondvar(&p->cv, p->lockname);
initwqframe(&p->wqframe);
......
......@@ -126,7 +126,7 @@ initsched(void)
int i;
for (i = 0; i < NCPU; i++) {
initlock(&runq[i].lock, "runq");
initlock(&runq[i].lock, "runq", LOCKSTAT_SCHED);
STAILQ_INIT(&runq[i].q);
}
}
......@@ -114,6 +114,7 @@ lockstat_init(struct spinlock *lk)
lk->stat = kmalloc(sizeof(*lk->stat));
if (lk->stat == NULL)
return;
memset(lk->stat, 0, sizeof(*lk->stat));
lk->stat->magic = LOCKSTAT_MAGIC;
......@@ -140,6 +141,8 @@ lockstat_clear(void)
LIST_FOREACH_SAFE(stat, &lockstat_list, link, tmp) {
if (stat->magic == 0) {
LIST_REMOVE(stat, link);
// So verifyfree doesn't follow le_next
stat->link.le_next = NULL;
gc_delayed(stat, kmfree);
} else {
memset(&stat->s.cpu, 0, sizeof(stat->s.cpu));
......@@ -211,14 +214,16 @@ initlockstat(void)
#endif
void
initlock(struct spinlock *lk, const char *name)
initlock(struct spinlock *lk, const char *name, int lockstat)
{
#if SPINLOCK_DEBUG
lk->name = name;
lk->cpu = 0;
#endif
#if LOCKSTAT
lockstat_init(lk);
lk->stat = NULL;
if (lockstat)
lockstat_init(lk);
#endif
lk->locked = 0;
}
......
......@@ -24,7 +24,7 @@ vma_alloc(void)
memset(e, 0, sizeof(struct vma));
e->va_type = PRIVATE;
snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e);
initlock(&e->lock, e->lockname);
initlock(&e->lock, e->lockname, LOCKSTAT_VM);
return e;
}
......@@ -41,6 +41,7 @@ vma_free(void *p)
struct vma *e = (struct vma *) p;
if(e->n)
vmn_decref(e->n);
destroylock(&e->lock);
kmfree(e);
}
......@@ -107,11 +108,12 @@ vmap_alloc(void)
return 0;
memset(m, 0, sizeof(struct vmap));
snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m);
initlock(&m->lock, m->lockname);
initlock(&m->lock, m->lockname, LOCKSTAT_VM);
m->ref = 1;
m->pml4 = setupkvm();
if (m->pml4 == 0) {
cprintf("vmap_alloc: setupkvm out of memory\n");
destroylock(&m->lock);
kmfree(m);
return 0;
}
......@@ -331,6 +333,7 @@ vmap_free(void *p)
freevm(m->pml4);
m->pml4 = 0;
m->alloc = 0;
destroylock(&m->lock);
}
// Does any vma overlap start..start+len?
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论