提交 ea416994 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

const'ify various fields in crange, which helps reasoning

about what fields can/cannot be changed by another core while the object is allocated.
上级 ced82a19
......@@ -86,12 +86,12 @@ class markptr_mark : public markptr<T> {
struct range : public rcu_freed {
private:
u64 key;
u64 size;
const u64 key;
const u64 size;
atomic<int> curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
crange *cr; // the crange this range is part of
markptr<range>* next; // one next pointer per level
const int nlevel; // the number of levels this range should appear
crange *const cr; // the crange this range is part of
markptr<range>* const next; // one next pointer per level
spinlock *lock; // on separate cache line?
void print(int l);
......@@ -131,7 +131,8 @@ class crange_locked;
struct crange {
private:
range *crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
const int nlevel; // number of levels in the crange skip list
range *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
static void mark(range *f, range *s);
static void freen(struct range *f, struct range *l);
......@@ -147,7 +148,6 @@ struct crange {
friend class range;
public:
int nlevel; // number of levels in the crange skip list
crange(int nlevel);
~crange(void);
......@@ -172,10 +172,15 @@ end(const crange &cr)
struct crange_locked {
private:
crange *cr_;
u64 base_, size_;
crange *const cr_;
const u64 base_, size_;
range *prev_, *first_, *last_, *succ_;
range *const prev_;
range *first_;
range *last_;
range *const succ_;
bool moved_;
scoped_gc_epoch gc;
crange_locked(crange *cr, u64 base, u64 size, range *p, range *f, range *l, range *s);
......
......@@ -115,17 +115,13 @@ range::dec_ref(void)
}
range::range(crange *crarg, u64 k, u64 sz, int nl)
: rcu_freed("range_delayed")
: rcu_freed("range_delayed"),
key(k), size(sz), curlevel(0),
nlevel(nl ?: range_draw_nlevel(cr->nlevel)),
cr(crarg), next(new markptr<range>[nlevel])
{
dprintf("range::range: %lu %lu %d\n", k, sz, nl);
key = k;
size = sz;
cr = crarg;
assert(cr->nlevel > 0);
curlevel = 0;
if (nl == 0) nlevel = range_draw_nlevel(cr->nlevel);
else nlevel = nl;
next = new markptr<range>[nlevel]; // cache align?
assert(next);
for (int l = 0; l < nlevel; l++) next[l] = 0;
assert(kmalign((void **) &lock, CACHELINE,
......@@ -203,10 +199,9 @@ crange::print(int full)
}
crange::crange(int nl)
: nlevel(nl), crange_head(new range(this, 0, 0, nlevel))
{
assert(nl > 0);
nlevel = nl;
crange_head = new range(this, 0, 0, nlevel);
dprintf("crange::crange return 0x%lx\n", (u64) this);
}
......@@ -471,31 +466,27 @@ crange::search_lock(u64 k, u64 sz)
}
crange_locked::crange_locked(crange *cr, u64 base, u64 sz, range *p, range *f, range *l, range *s)
: cr_(cr), base_(base), size_(sz), prev_(p), first_(f), last_(l), succ_(s)
: cr_(cr), base_(base), size_(sz), prev_(p), first_(f), last_(l), succ_(s), moved_(false)
{
}
crange_locked::crange_locked(crange_locked &&x)
: gc(std::move(x.gc))
: cr_(x.cr_),
base_(x.base_),
size_(x.size_),
prev_(x.prev_),
first_(x.first_),
last_(x.last_),
succ_(x.succ_),
moved_(false),
gc(std::move(x.gc))
{
cr_ = x.cr_;
base_ = x.base_;
size_ = x.size_;
prev_ = x.prev_;
first_ = x.first_;
last_ = x.last_;
succ_ = x.succ_;
x.cr_ = 0;
x.prev_ = 0;
x.first_ = 0;
x.last_ = 0;
x.succ_ = 0;
x.moved_ = true;
}
crange_locked::~crange_locked()
{
if (prev_) {
if (!moved_) {
range *n;
for (range *e = prev_; e && e != succ_; e = n) {
// as soon a we release, the next pointer can change, so read it first
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论