提交 3e6c70a4 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

initial radix tree and VM

上级 7eea37c9
......@@ -42,7 +42,7 @@ class markptr {
template<class T>
class markptr_ptr : private markptr<T> {
public:
void operator=(T *p) {
void operator=(T* p) {
uptr p0, p1;
do {
p0 = markptr<T>::_p.load();
......@@ -50,6 +50,22 @@ class markptr_ptr : private markptr<T> {
} while (!markptr<T>::_p.compare_exchange_weak(p0, p1));
}
bool cmpxch_update(T** expected, T* desired) {
uptr p0, p1;
do {
p0 = markptr<T>::_p.load();
p1 = (p0 & 1) | (uptr) desired;
T* cur = (T*) (p0 & ~1);
if (cur != *expected) {
*expected = cur;
return false;
}
} while (!markptr<T>::_p.compare_exchange_weak(p0, p1));
return true;
}
T* load() const {
return (T*) (markptr<T>::_p.load() & ~1);
}
......
......@@ -2,9 +2,13 @@
#include "atomic.hh"
#include "crange_arch.hh"
#include "crange.hh"
#include "radix.hh"
#include "cpputil.hh"
#include "hwvm.hh"
#define VM_CRANGE 0
#define VM_RADIX 1
using std::atomic;
// A memory object (physical pages or inode).
......@@ -34,7 +38,14 @@ struct vmnode {
// a specific memory object.
enum vmatype { PRIVATE, COW };
struct vma : public range {
struct vma
#if VM_CRANGE
: public range
#endif
#if VM_RADIX
: public radix_elem
#endif
{
const uptr vma_start; // start of mapping
const uptr vma_end; // one past the last byte
const enum vmatype va_type;
......@@ -50,7 +61,14 @@ struct vma : public range {
// An address space: a set of vmas plus h/w page table.
// The elements of e[] are not ordered by address.
struct vmap {
#if VM_CRANGE
struct crange cr;
#endif
#if VM_RADIX
struct radix rx;
#endif
atomic<u64> ref;
pgmap *const pml4; // Page table
char *const kshared;
......
......@@ -30,6 +30,7 @@ OBJS = \
pipe.o \
proc.o \
gc.o \
radix.o \
rnd.o \
sampler.o \
sched.o \
......
......@@ -68,6 +68,7 @@ bucket(u64 nbytes)
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12,
};
assert(nbytes <= PGSIZE);
......
......@@ -298,9 +298,14 @@ growproc(int n)
// sbrk() would start to use the next region (e.g. the stack).
uptr newstart = PGROUNDUP(curbrk);
s64 newn = PGROUNDUP(n + curbrk - newstart);
#if VM_CRANGE
range *prev = 0;
auto span = m->cr.search_lock(newstart, newn + PGSIZE);
for (range *r: span) {
#endif
#if VM_RADIX
auto span = m->rx.search_lock(newstart, newn + PGSIZE);
#endif
for (auto r: span) {
vma *e = (vma*) r;
if (e->vma_start <= newstart) {
......@@ -311,7 +316,9 @@ growproc(int n)
newn -= e->vma_end - newstart;
newstart = e->vma_end;
#if VM_CRANGE
prev = e;
#endif
} else {
cprintf("growproc: overlap with existing mapping; brk %lx n %d\n",
curbrk, n);
......@@ -332,7 +339,13 @@ growproc(int n)
return -1;
}
#if VM_CRANGE
span.replace(prev, repl);
#endif
#if VM_RADIX
span.replace(newstart, newn, repl);
#endif
myproc()->brk += n;
return 0;
......
......@@ -118,8 +118,10 @@ vmnode::demand_load()
* vma
*/
vma::vma(vmap *vmap, uptr start, uptr end, enum vmatype vtype, vmnode *vmn)
: range(&vmap->cr, start, end-start),
vma::vma(vmap *vmap, uptr start, uptr end, enum vmatype vtype, vmnode *vmn) :
#if VM_CRANGE
range(&vmap->cr, start, end-start),
#endif
vma_start(start), vma_end(end), va_type(vtype), n(vmn)
{
if (n)
......@@ -136,8 +138,14 @@ vma::~vma()
* vmap
*/
vmap::vmap()
: cr(10), ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared))
vmap::vmap() :
#if VM_CRANGE
cr(10),
#endif
#if VM_RADIX
rx(PGSHIFT),
#endif
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared))
{
if (pml4 == 0) {
cprintf("vmap_alloc: setupkvm out of memory\n");
......@@ -181,12 +189,24 @@ vmap::decref()
bool
vmap::replace_vma(vma *a, vma *b)
{
#if VM_CRANGE
auto span = cr.search_lock(a->vma_start, a->vma_end - a->vma_start);
#endif
#if VM_RADIX
auto span = rx.search_lock(a->vma_start, a->vma_end - a->vma_start);
#endif
if (a->deleted())
return false;
for (auto e: span)
assert(a == e);
#if VM_CRANGE
span.replace(b);
#endif
#if VM_RADIX
span.replace(a->vma_start, b->vma_start-a->vma_start, 0);
span.replace(b->vma_start, b->vma_end-b->vma_start, b);
span.replace(b->vma_end, a->vma_end-b->vma_end, 0);
#endif
return true;
}
......@@ -197,7 +217,14 @@ vmap::copy(int share)
if(nm == 0)
return 0;
for (range *r: cr) {
#if VM_CRANGE
for (auto r: cr) {
#endif
#if VM_RADIX
for (auto r: rx) {
if (!r)
continue;
#endif
vma *e = (vma *) r;
struct vma *ne;
......@@ -231,10 +258,27 @@ vmap::copy(int share)
goto err;
}
#if VM_CRANGE
auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
for (auto x __attribute__((unused)): span)
#endif
#if VM_RADIX
auto span = nm->rx.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
#endif
for (auto x __attribute__((unused)): span) {
#if VM_RADIX
if (!x)
continue;
#endif
cprintf("non-empty span: %p (orig 0x%lx--0x%lx)\n",
x, ne->vma_start, ne->vma_end);
assert(0); /* span must be empty */
}
#if VM_CRANGE
span.replace(ne);
#endif
#if VM_RADIX
span.replace(ne->vma_start, ne->vma_end-ne->vma_start, ne);
#endif
}
if (share)
......@@ -259,7 +303,13 @@ vmap::lookup(uptr start, uptr len)
if (start + len < start)
panic("vmap::lookup bad len");
range *r = cr.search(start, len);
#if VM_CRANGE
auto r = cr.search(start, len);
#endif
#if VM_RADIX
assert(len <= PGSIZE);
auto r = rx.search(start);
#endif
if (r != 0) {
vma *e = (vma *) r;
if (e->vma_end <= e->vma_start)
......@@ -282,10 +332,20 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb)
{
// new scope to release the search lock before tlbflush
u64 len = n->npages * PGSIZE;
#if VM_CRANGE
auto span = cr.search_lock(vma_start, len);
#endif
#if VM_RADIX
auto span = rx.search_lock(vma_start, len);
#endif
for (auto r: span) {
#if VM_RADIX
if (!r)
continue;
#endif
vma *rvma = (vma*) r;
cprintf("vmap::insert: overlap with 0x%lx--0x%lx\n", rvma->vma_start, rvma->vma_end);
cprintf("vmap::insert: overlap with %p: 0x%lx--0x%lx\n",
rvma, rvma->vma_start, rvma->vma_end);
return -1;
}
......@@ -297,7 +357,12 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb)
return -1;
}
#if VM_CRANGE
span.replace(e);
#endif
#if VM_RADIX
span.replace(e->vma_start, e->vma_end-e->vma_start, e);
#endif
}
bool needtlb = false;
......@@ -327,7 +392,12 @@ vmap::remove(uptr vma_start, uptr len)
// new scope to release the search lock before tlbflush
uptr vma_end = vma_start + len;
#if VM_CRANGE
auto span = cr.search_lock(vma_start, len);
#endif
#if VM_RADIX
auto span = rx.search_lock(vma_start, len);
#endif
for (auto r: span) {
vma *rvma = (vma*) r;
if (rvma->vma_start < vma_start || rvma->vma_end > vma_end) {
......@@ -338,7 +408,12 @@ vmap::remove(uptr vma_start, uptr len)
// XXX handle partial unmap
#if VM_CRANGE
span.replace(0);
#endif
#if VM_RADIX
span.replace(vma_start, len, 0);
#endif
}
bool needtlb = false;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论