提交 71ecfe98 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

Merge branch 'scale-amd64' of git+ssh://pdos.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

......@@ -9,3 +9,4 @@ o.*
/mscan.syms
/mscan.kern
config.mk
lwip
......@@ -137,7 +137,7 @@ kfree_pool(struct kmem *m, char *v)
panic("kfree_pool: unknown region %p", v);
// Fill with junk to catch dangling refs.
if (kalloc_memset && kinited && m->size <= 16384)
if (ALLOC_MEMSET && kinited && m->size <= 16384)
memset(v, 1, m->size);
acquire(&m->lock);
......@@ -189,7 +189,7 @@ kalloc_pool(struct kmem *km)
mtlabel(mtrace_label_block, r, m->size, "kalloc", sizeof("kalloc"));
if (kalloc_memset && m->size <= 16384)
if (ALLOC_MEMSET && m->size <= 16384)
memset(r, 2, m->size);
return (char*)r;
}
......@@ -303,7 +303,7 @@ verifyfree(char *ptr, u64 nbytes)
if (KBASE < x && x < KBASE+(128ull<<30)) {
struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC)
panic("verifyunmarked: LOCKSTAT_MAGIC %p(%lu):%p->%p",
panic("LOCKSTAT_MAGIC %p(%lu):%p->%p",
ptr, nbytes, p, kls);
}
}
......
......@@ -12,5 +12,3 @@ struct kmem {
} __mpalign__;
extern struct kmem kmems[NCPU];
enum { kalloc_memset = 1 };
......@@ -102,10 +102,13 @@ kmfree(void *ap)
b = (long) h->next;
if(b < 0 || b > KMMAX)
panic("kmfree bad bucket");
verifyfree(ap, (1 << b) - sizeof(struct header));
verifyfree(ap, (1<<b) - sizeof(struct header));
if (ALLOC_MEMSET)
memset(ap, 3, (1<<b) - sizeof(struct header));
h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h;
mtunlabel(mtrace_label_heap, ap);
release(&freelists[c].lock);
}
......
......@@ -29,7 +29,7 @@ struct klockstat {
#define LOCKSTAT_CLEAR 3
// Debug knobs
#define LOCKSTAT_BIO 0
#define LOCKSTAT_BIO 1
#define LOCKSTAT_CONDVAR 0
#define LOCKSTAT_CONSOLE 1
#define LOCKSTAT_CRANGE 1
......
......@@ -19,6 +19,7 @@
#define SPINLOCK_DEBUG 1 // Debug spin locks
#define LOCKSTAT 0
#define VERIFYFREE LOCKSTAT
#define ALLOC_MEMSET 1
#if defined(HW_josmp)
#define NCPU 16 // maximum number of CPUs
#define MTRACE 0
......
#include "types.h"
#include "kernel.h"
#include "mmu.h"
#include "amd64.h"
#include "spinlock.h"
#include "condvar.h"
#include "queue.h"
#include "proc.h"
#include "cpu.h"
#include "kmtrace.h"
struct rcu {
unsigned long epoch;
TAILQ_ENTRY(rcu) link;
union {
struct {
void (*dofree)(void *);
void *item;
} f1;
struct {
void (*dofree)(int, u64);
int arg1;
u64 arg2;
} f2;
};
int type;
};
TAILQ_HEAD(rcu_head, rcu);
static struct { struct rcu_head x __mpalign__; } rcu_q[NCPU];
static u64 global_epoch __mpalign__;
static struct { struct spinlock l __mpalign__; } rcu_lock[NCPU];
static struct { int v __mpalign__; } delayed_nfree[NCPU];
static struct { struct condvar cv __mpalign__; } rcu_cv[NCPU];
enum { rcu_debug = 0 };
struct rcu *
rcu_alloc()
{
return kmalloc(sizeof(struct rcu));
}
void *
rcu_min(void *vkey, void *v, void *arg){
u64 *min_epoch_p = arg;
struct proc *p = (struct proc *) v;
if (*min_epoch_p > p->epoch) {
*min_epoch_p = p->epoch;
}
return 0;
}
// XXX use atomic instruction to update list (instead of holding lock)
// lists of lists?
void
rcu_gc_work(void)
{
struct rcu *r, *nr;
u64 min_epoch = global_epoch;
int n = 0;
ns_enumerate(nspid, rcu_min, &min_epoch);
// pushcli(); // not necessary: rcup->cpu_pin==1
acquire(&rcu_lock[mycpu()->id].l);
for (r = TAILQ_FIRST(&rcu_q[mycpu()->id].x); r != NULL; r = nr) {
if (r->epoch >= min_epoch)
break;
release(&rcu_lock[mycpu()->id].l);
// cprintf("free: %d (%x %x)\n", r->epoch, r->dofree, r->item);
switch (r->type) {
case 1:
r->f1.dofree(r->f1.item);
break;
case 2:
r->f2.dofree(r->f2.arg1, r->f2.arg2);
break;
default:
panic("rcu type");
}
acquire(&rcu_lock[mycpu()->id].l);
delayed_nfree[mycpu()->id].v--;
n++;
nr = TAILQ_NEXT(r, link);
TAILQ_REMOVE(&rcu_q[mycpu()->id].x, r, link);
kmfree(r);
}
release(&rcu_lock[mycpu()->id].l);
if (rcu_debug)
cprintf("rcu_gc: cpu %d n %d delayed_nfree=%d min_epoch=%d\n",
mycpu()->id, n, delayed_nfree[mycpu()->id], min_epoch);
// popcli(); // not necessary: rcup->cpu_pin==1
// global_epoch can be bumped anywhere; this seems as good a place as any
__sync_fetch_and_add(&global_epoch, 1);
}
void
rcu_gc_worker(void)
{
release(&myproc()->lock); // initially held by scheduler
mtstart(rcu_gc_worker, myproc());
struct spinlock wl;
initlock(&wl, "rcu_gc_worker"); // dummy lock
for (;;) {
rcu_gc_work();
acquire(&wl);
cv_sleep(&rcu_cv[mycpu()->id].cv, &wl);
release(&wl);
}
}
void
rcu_gc(void)
{
cv_wakeup(&rcu_cv[mycpu()->id].cv);
}
// XXX Use atomic instruction to update list (instead of holding lock)
static void
rcu_delayed_int(struct rcu *r)
{
pushcli();
acquire(&rcu_lock[mycpu()->id].l);
// cprintf("rcu_delayed: %d\n", global_epoch);
TAILQ_INSERT_TAIL(&rcu_q[mycpu()->id].x, r, link);
delayed_nfree[mycpu()->id].v++;
release(&rcu_lock[mycpu()->id].l);
popcli();
}
void
rcu_delayed(void *e, void (*dofree)(void *))
{
if (rcu_debug)
cprintf("rcu_delayed: %x %x\n", dofree, e);
struct rcu *r = rcu_alloc();
if (r == 0)
panic("rcu_delayed");
r->f1.dofree = dofree;
r->f1.item = e;
r->epoch = global_epoch;
r->type = 1;
rcu_delayed_int(r);
}
void
rcu_delayed2(int a1, u64 a2, void (*dofree)(int,u64))
{
struct rcu *r = rcu_alloc();
if (r == 0)
panic("rcu_delayed");
r->f2.dofree = dofree;
r->f2.arg1 = a1;
r->f2.arg2 = a2;
r->epoch = global_epoch;
r->type = 2;
rcu_delayed_int(r);
}
void
rcu_begin_read(void)
{
if (myproc() && myproc()->rcu_read_depth++ == 0)
myproc()->epoch = global_epoch;
__sync_synchronize();
}
void
rcu_end_read(void)
{
if (myproc() && myproc()->rcu_read_depth > 0 &&
--myproc()->rcu_read_depth == 0)
myproc()->epoch = INF;
}
void
rcu_begin_write(struct spinlock *l)
{
if (l)
acquire(l);
__sync_synchronize();
rcu_begin_read();
}
void
rcu_end_write(struct spinlock *l)
{
rcu_end_read();
if (l)
release(l);
}
void
initrcu(void)
{
for (int i = 0; i < NCPU; i++) {
initlock(&rcu_lock[i].l, "rcu");
TAILQ_INIT(&rcu_q[i].x);
initcondvar(&rcu_cv[i].cv, "rcu_gc_cv");
}
}
......@@ -155,15 +155,26 @@ static int
lockstat_read(struct inode *ip, char *dst, u32 off, u32 n)
{
static const u64 sz = sizeof(struct lockstat);
static struct {
struct klockstat *stat;
u32 off;
} cache;
struct klockstat *stat;
u32 cur;
if (off % sz || n < sz)
return -1;
cur = 0;
acquire(&lockstat_lock);
LIST_FOREACH(stat, &lockstat_list, link) {
if (cache.off == off && cache.stat != NULL) {
cur = cache.off;
stat = cache.stat;
} else {
cur = 0;
stat = LIST_FIRST(&lockstat_list);
}
for (; stat != NULL; stat = LIST_NEXT(stat, link)) {
struct lockstat *ls = &stat->s;
if (n < sizeof(*ls))
break;
......@@ -176,7 +187,15 @@ lockstat_read(struct inode *ip, char *dst, u32 off, u32 n)
}
release(&lockstat_lock);
return cur >= off ? cur - off : 0;
if (cur < off) {
cache.off = 0;
cache.stat = NULL;
return 0;
}
cache.off = cur;
cache.stat = stat;
return cur - off;
}
static int
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论