提交 9c16a4ba 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

avoid the compare_exchange_strong() member function

上级 c2391d31
...@@ -13,3 +13,31 @@ ...@@ -13,3 +13,31 @@
#define _GLIBCXX_ATOMIC_BUILTINS_8 1 #define _GLIBCXX_ATOMIC_BUILTINS_8 1
#include "atomic_std.h" #include "atomic_std.h"
template<class T>
bool
cmpxch(std::atomic<T> *a, T expected, T desired)
{
return a->compare_exchange_weak(expected, desired);
}
template<class T>
bool
cmpxch(volatile std::atomic<T> *a, T expected, T desired)
{
return a->compare_exchange_weak(expected, desired);
}
template<class T>
bool
cmpxch_update(std::atomic<T> *a, T *expected, T desired)
{
return a->compare_exchange_weak(*expected, desired);
}
template<class T>
bool
cmpxch_update(volatile std::atomic<T> *a, T *expected, T desired)
{
return a->compare_exchange_weak(*expected, desired);
}
...@@ -392,7 +392,7 @@ void crange::add_index(int l, range *e, range *p1, range *s1) ...@@ -392,7 +392,7 @@ void crange::add_index(int l, range *e, range *p1, range *s1)
if (l >= e->nlevel-1) return; if (l >= e->nlevel-1) return;
if (MARKED(e->next[l+1])) return; if (MARKED(e->next[l+1])) return;
// crange_check(cr, NULL); // crange_check(cr, NULL);
if (e->curlevel.compare_exchange_strong(l, l+1)) { if (cmpxch(&e->curlevel, l, l+1)) {
assert(e->curlevel < e->nlevel); assert(e->curlevel < e->nlevel);
// this is the core inserting at level l+1, but some core may be deleting // this is the core inserting at level l+1, but some core may be deleting
struct range *s = WOMARK(s1); struct range *s = WOMARK(s1);
......
...@@ -299,7 +299,7 @@ iget(u32 dev, u32 inum) ...@@ -299,7 +299,7 @@ iget(u32 dev, u32 inum)
ins->remove(mkpair(victim->dev, victim->inum), &victim); ins->remove(mkpair(victim->dev, victim->inum), &victim);
gc_delayed(victim); gc_delayed(victim);
} else { } else {
if (!icache_free[mycpu()->id].x.compare_exchange_strong(cur_free, cur_free-1)) if (!cmpxch(&icache_free[mycpu()->id].x, cur_free, cur_free-1))
goto retry_evict; goto retry_evict;
} }
...@@ -644,8 +644,7 @@ dir_init(struct inode *dp) ...@@ -644,8 +644,7 @@ dir_init(struct inode *dp)
brelse(bp, 0); brelse(bp, 0);
} }
decltype(dir) expect_null = 0; if (!cmpxch(&dp->dir, (decltype(dir)) 0, dir)) {
if (!dp->dir.compare_exchange_strong(expect_null, dir)) {
// XXX free all the dirents // XXX free all the dirents
delete dir; delete dir;
} }
......
...@@ -82,7 +82,7 @@ gc_move_to_tofree_cpu(int c, u64 epoch) ...@@ -82,7 +82,7 @@ gc_move_to_tofree_cpu(int c, u64 epoch)
assert(gc_state[c].delayed[fe].epoch == epoch-(NEPOCH-2)); // XXX race with setting epoch = 0 assert(gc_state[c].delayed[fe].epoch == epoch-(NEPOCH-2)); // XXX race with setting epoch = 0
// unhook list for fe epoch atomically; this shouldn't fail // unhook list for fe epoch atomically; this shouldn't fail
head = gc_state[c].delayed[fe].head; head = gc_state[c].delayed[fe].head;
while (!gc_state[c].delayed[fe].head.compare_exchange_strong(head, (rcu_freed*)0)) {} while (!cmpxch_update(&gc_state[c].delayed[fe].head, &head, (rcu_freed*) 0)) {}
// insert list into tofree list so that each core can free in parallel and free its elements // insert list into tofree list so that each core can free in parallel and free its elements
if(gc_state[c].tofree[fe].epoch != gc_state[c].delayed[fe].epoch) { if(gc_state[c].tofree[fe].epoch != gc_state[c].delayed[fe].epoch) {
...@@ -91,7 +91,7 @@ gc_move_to_tofree_cpu(int c, u64 epoch) ...@@ -91,7 +91,7 @@ gc_move_to_tofree_cpu(int c, u64 epoch)
gc_state[c].delayed[fe].epoch.load()); gc_state[c].delayed[fe].epoch.load());
assert(0); assert(0);
} }
assert(gc_state[c].tofree[fe].head.exchange(head) == 0); assert(cmpxch(&gc_state[c].tofree[fe].head, (rcu_freed*) 0, head));
// move delayed NEPOCH's adhead // move delayed NEPOCH's adhead
gc_state[c].delayed[fe].epoch += NEPOCH; gc_state[c].delayed[fe].epoch += NEPOCH;
...@@ -109,7 +109,7 @@ gc_move_to_tofree(u64 epoch) ...@@ -109,7 +109,7 @@ gc_move_to_tofree(u64 epoch)
for (int c = 0; c < ncpu; c++) { for (int c = 0; c < ncpu; c++) {
gc_move_to_tofree_cpu(c, epoch); gc_move_to_tofree_cpu(c, epoch);
} }
assert(global_epoch.compare_exchange_strong(epoch, epoch+1)); assert(cmpxch(&global_epoch, epoch, epoch+1));
} }
// If all threads have seen global_epoch, we can move elements in global_epoch-2 to tofreelist // If all threads have seen global_epoch, we can move elements in global_epoch-2 to tofreelist
...@@ -169,7 +169,7 @@ gc_delayed(rcu_freed *e) ...@@ -169,7 +169,7 @@ gc_delayed(rcu_freed *e)
} }
e->_rcu_epoch = myepoch; e->_rcu_epoch = myepoch;
e->_rcu_next = gc_state[c].delayed[myepoch % NEPOCH].head; e->_rcu_next = gc_state[c].delayed[myepoch % NEPOCH].head;
while (!gc_state[c].delayed[myepoch % NEPOCH].head.compare_exchange_strong(e->_rcu_next, e)) {} while (!cmpxch_update(&gc_state[c].delayed[myepoch % NEPOCH].head, &e->_rcu_next, e)) {}
popcli(); popcli();
} }
......
...@@ -90,8 +90,7 @@ class xns : public rcu_freed { ...@@ -90,8 +90,7 @@ class xns : public rcu_freed {
} }
e->next = root.load(); e->next = root.load();
auto expect = e->next.load(); if (cmpxch(&table[i].chain, e->next.load(), e))
if (table[i].chain.compare_exchange_strong(expect, e))
return 0; return 0;
} }
} }
...@@ -121,21 +120,19 @@ class xns : public rcu_freed { ...@@ -121,21 +120,19 @@ class xns : public rcu_freed {
auto pe = &table[i].chain; auto pe = &table[i].chain;
for (;;) { for (;;) {
auto e = *pe; auto e = pe->load();
if (!e) if (!e)
return false; return false;
if (e->key == key && (!vp || e->val == *vp)) { if (e->key == key && (!vp || e->val == *vp)) {
int zero = 0; if (!cmpxch(&e->next_lock, 0, 1))
if (!e->next_lock.compare_exchange_strong(zero, 1))
break; break;
if (!pelock->compare_exchange_strong(zero, 1)) { if (!cmpxch(pelock, 0, 1)) {
e->next_lock = 0; e->next_lock = 0;
break; break;
} }
auto expect = e.load(); /* XXX c_e_s replaces first arg! */ if (!cmpxch(pe, e, e->next.load())) {
if (!pe->compare_exchange_strong(expect, e->next)) {
*pelock = 0; *pelock = 0;
e->next_lock = 0; e->next_lock = 0;
break; break;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论