提交 c0436a69 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

lock-free linked lists are susceptible to ABA races,

which actually show up on real hardware.. hmm.
上级 020f4b22
......@@ -84,7 +84,6 @@ static void dostack(uptr a0, u64 a1)
goto bad;
if(args->vmap->insert(vmn, USERTOP-(USTACKPAGES*PGSIZE), 1) < 0)
goto bad;
vmn = 0;
// Push argument strings, prepare rest of stack in ustack.
sp = USERTOP;
......@@ -135,7 +134,6 @@ static void doheap(uptr a0, u64 a1)
goto bad;
if(args->vmap->insert(vmn, BRK, 1) < 0)
goto bad;
vmn = 0;
prof_end(doheap_prof);
return;
......@@ -149,7 +147,6 @@ exec(char *path, char **argv)
{
struct inode *ip = NULL;
struct vmap *vmp = NULL;
struct vmnode *vmn = NULL;
struct elfhdr elf;
struct proghdr ph;
u64 off;
......@@ -227,8 +224,6 @@ exec(char *path, char **argv)
cprintf("exec failed\n");
if(vmp)
vmp->decref();
if(vmn)
delete vmn;
gc_end_epoch();
return 0;
......
......@@ -58,6 +58,7 @@ gc_free_tofreelist(atomic<rcu_freed*> *head, u64 epoch)
for (; r; r = nr) {
if (r->_rcu_epoch > epoch) {
cprintf("gc_free_tofreelist: r->epoch %ld > epoch %ld\n", r->_rcu_epoch, epoch);
cprintf("gc_free_tofreelist: name %s\n", r->_rcu_type);
assert(0);
}
nr = r->_rcu_next;
......
......@@ -170,9 +170,13 @@ kalloc_pool(struct kmem *km)
m = &km[cn];
r = m->freelist;
while (r && !cmpxch_update(&m->freelist, &r, r->next))
run *nxt = r->next;
while (r && !cmpxch_update(&m->freelist, &r, nxt))
; /* spin */
if (r && r->next != nxt)
panic("kalloc_pool: aba race %p %p %p\n", r, r->next, nxt);
if (r) {
m->nfree--;
break;
......
......@@ -88,11 +88,18 @@ kmalloc(u64 nbytes)
return 0;
}
} else {
if (cmpxch(&freelists[c].buckets[b], h, h->next))
header *nxt = h->next;
if (cmpxch(&freelists[c].buckets[b], h, nxt)) {
if (h->next != nxt)
panic("kmalloc: aba race");
break;
}
}
}
if (ALLOC_MEMSET)
memset(h, 4, (1<<b));
mtlabel(mtrace_label_heap, (void*) h, nbytes, "kmalloc'ed", sizeof("kmalloc'ed"));
return h;
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论