提交 1229ed4b 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

SPINLOCK_DEBUG

上级 71c12c3a
......@@ -5,7 +5,11 @@
// name spaces
// XXX maybe use open hash table, no chain, better cache locality
#if SPINLOCK_DEBUG
#define NHASH 10
#else
#define NHASH 100
#endif
// XXX cache align
struct elem {
......
......@@ -110,8 +110,10 @@ addrun1(struct runq *rq, struct proc *p)
void
addrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("addrun no p->lock");
#endif
acquire(&runqs[p->cpuid].lock);
// cprintf("%d: addrun %d\n", cpu->id, p->pid);
addrun1(&runqs[p->cpuid], p);
......@@ -134,8 +136,10 @@ delrun1(struct runq *rq, struct proc *p)
void
delrun(struct proc *p)
{
#if SPINLOCK_DEBUG
if(!holding(&p->lock))
panic("delrun no p->lock");
#endif
acquire(&runq->lock);
// cprintf("%d: delrun %d\n", cpu->id, p->pid);
delrun1(runq, p);
......@@ -566,8 +570,10 @@ sched(void)
{
int intena;
#if SPINLOCK_DEBUG
if(!holding(&proc->lock))
panic("sched proc->lock");
#endif
if(cpu->ncli != 1)
panic("sched locks");
if(proc->state == RUNNING)
......
......@@ -112,18 +112,20 @@ rcu_end_read(void)
void
rcu_begin_write(struct spinlock *l)
{
rcu_begin_read();
if (l) acquire(l);
__sync_synchronize();
rcu_begin_read();
}
void
rcu_end_write(struct spinlock *l)
{
rcu_end_read();
// global_epoch can be bumped anywhere; this seems as good a place as any
__sync_fetch_and_add(&global_epoch, 1);
if (l) release(l);
rcu_end_read();
}
......@@ -15,9 +15,11 @@
void
initlock(struct spinlock *lk, char *name)
{
#if SPINLOCK_DEBUG
lk->name = name;
lk->locked = 0;
lk->cpu = 0;
#endif
lk->locked = 0;
}
// Acquire the lock.
......@@ -28,12 +30,19 @@ void
acquire(struct spinlock *lk)
{
pushcli(); // disable interrupts to avoid deadlock.
#if SPINLOCK_DEBUG
if(holding(lk))
panic("acquire");
#endif
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_acquire,
0);
......@@ -45,32 +54,46 @@ acquire(struct spinlock *lk)
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_acquired,
0);
#if SPINLOCK_DEBUG
// Record info about lock acquisition for debugging.
lk->cpu = cpu;
getcallerpcs(&lk, lk->pcs);
#endif
}
// Release the lock.
void
release(struct spinlock *lk)
{
#if SPINLOCK_DEBUG
if(!holding(lk)) {
cprintf("lock: %s\n", lk->name);
panic("release");
}
#endif
mtrace_lock_register(RET_EIP(),
lk,
#if SPINLOCK_DEBUG
lk->name ?: "null",
#else
"unknown",
#endif
mtrace_lockop_release,
0);
#if SPINLOCK_DEBUG
lk->pcs[0] = 0;
lk->cpu = 0;
#endif
// The xchg serializes, so that reads before release are
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
......@@ -105,11 +128,13 @@ getcallerpcs(void *v, uint pcs[])
}
// Check whether this cpu is holding the lock.
#if SPINLOCK_DEBUG
int
holding(struct spinlock *lock)
{
return lock->locked && lock->cpu == cpu;
}
#endif
// Pushcli/popcli are like cli/sti except that they are matched:
......
#pragma once
#define SPINLOCK_DEBUG 1
// Mutual exclusion lock.
struct spinlock {
uint locked; // Is the lock held?
#if SPINLOCK_DEBUG
// For debugging:
char *name; // Name of lock.
struct cpu *cpu; // The cpu holding the lock.
uint pcs[10]; // The call stack (an array of program counters)
// that locked the lock.
#endif
};
......@@ -486,8 +486,10 @@ vmap_decref(struct vmap *m)
int
vmap_overlap(struct vmap *m, uint start, uint len)
{
#if SPINLOCK_DEBUG
if(holding(&m->lock) == 0)
panic("vmap_overlap no lock");
#endif
if(start + len < start)
panic("vmap_overlap bad len");
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论