提交 e8faaaf6 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

checkpoint:

- new crange api - fix several crange bugs (see comments) - better scoped_acquire compiles but still buggy and missing tlb shootdowns
上级 2a05dc1b
#include "types.h"
#include "amd64.h"
#include "mmu.h"
#include "kernel.hh"
#include "spinlock.h"
#include "condvar.h"
#include "queue.h"
#include "proc.hh"
#include "kernel.hh"
#include "cpu.hh"
struct spinlock tickslock __mpalign__;
......
......@@ -39,10 +39,33 @@ class scoped_acquire {
spinlock *_l;
public:
scoped_acquire(spinlock *l) : _l(l) { acquire(_l); }
~scoped_acquire() { release(_l); }
scoped_acquire(spinlock *l) : _l(0) { acquire(l); }
~scoped_acquire() { release(); }
void release() { if (_l) { ::release(_l); _l = 0; } }
void acquire(spinlock *l) { assert(!_l); acquire(_l); _l = l; }
};
namespace std {
template<class T>
struct remove_reference
{ typedef T type; };
template<class T>
struct remove_reference<T&>
{ typedef T type; };
template<class T>
struct remove_reference<T&&>
{ typedef T type; };
template<class T>
typename remove_reference<T>::type&&
move(T&& a)
{
return static_cast<typename remove_reference<T>::type&&>(a);
}
}
/* C++ runtime */
void *operator new(unsigned long nbytes);
void *operator new(unsigned long nbytes, void *buf);
......
差异被折叠。
......@@ -86,14 +86,14 @@ struct range : public rcu_freed {
public:
u64 key;
u64 size;
void *value;
rcu_freed *value;
atomic<int> curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
crange *cr; // the crange this range is part of
markptr<range>* next; // one next pointer per level
spinlock *lock; // on separate cache line?
range(crange *cr, u64 k, u64 sz, void *v, range *n, int nlevel = 0);
range(crange *cr, u64 k, u64 sz, rcu_freed *v, range *n, int nlevel = 0);
~range();
virtual void do_gc() {
delete this;
......@@ -111,28 +111,32 @@ class range_iterator {
public:
range_iterator(range *e) : _e(e) {}
range_iterator& operator++() { _e = _e->next[0].ptr(); return *this; }
range*& operator*() { return _e; }
bool operator==(const range_iterator &other) { return _e == other._e; }
bool operator!=(const range_iterator &other) { return _e != other._e; }
};
class crange_locked;
struct crange {
private:
range *crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
public:
int nlevel; // number of levels in the crange skip list
crange(int nlevel);
~crange(void);
void del(u64 k, u64 sz);
void add(u64 k, u64 sz, void *v);
range* search(u64 k, u64 sz, int mod = 0);
void print(int);
void check(struct range *absent);
int del_index(range *p0, range **e, int l);
void add_index(int l, range *e, range *p1, markptr<range> s1);
int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
int find_and_lock(u64 k, u64 sz, range **p0, range **f0, range **l0, range **s0);
friend class range;
public:
int nlevel; // number of levels in the crange skip list
crange(int nlevel);
~crange(void);
range* search(u64 k, u64 sz);
crange_locked search_lock(u64 k, u64 sz);
range_iterator begin() const { return range_iterator(crange_head->next[0].ptr()); };
range_iterator end() const { return range_iterator(0); };
......@@ -149,3 +153,38 @@ end(const crange &cr)
{
return cr.end();
}
struct crange_locked {
private:
crange *cr_;
u64 base_, size_;
range *prev_, *first_, *last_, *succ_;
scoped_gc_epoch gc;
crange_locked(crange *cr, u64 base, u64 size, range *p, range *f, range *l, range *s);
friend class crange;
crange_locked(const crange_locked&) = delete;
crange_locked& operator=(const crange_locked&) = delete;
public:
crange_locked(crange_locked &&x);
~crange_locked();
range_iterator begin() const { return range_iterator(first_); };
range_iterator end() const { return range_iterator(succ_); };
void replace(range *r);
};
static inline range_iterator
begin(const crange_locked &crl)
{
return crl.begin();
}
static inline range_iterator
end(const crange_locked &crl)
{
return crl.end();
}
#include "types.h"
#include "mmu.h"
#include "kernel.hh"
#include "spinlock.h"
#include "condvar.h"
#include "queue.h"
#include "proc.hh"
#include "kernel.hh"
#include "amd64.h"
#include "stat.h"
#include "fs.h"
......
#include "types.h"
#include "kernel.hh"
#include "spinlock.h"
#include "condvar.h"
#include "kernel.hh"
#include "fs.h"
#include "file.hh"
#include "stat.h"
......
......@@ -23,8 +23,17 @@ void gc_begin_epoch();
void gc_end_epoch();
class scoped_gc_epoch {
private:
bool valid;
public:
scoped_gc_epoch() { gc_begin_epoch(); }
~scoped_gc_epoch() { gc_end_epoch(); }
scoped_gc_epoch() { valid = true; gc_begin_epoch(); }
~scoped_gc_epoch() { if (valid) gc_end_epoch(); }
scoped_gc_epoch(const scoped_gc_epoch&) = delete;
scoped_gc_epoch(scoped_gc_epoch &&other) {
valid = other.valid;
other.valid = false;
}
};
#pragma once
extern "C" {
#include "kern_c.h"
}
......
#ifdef LWIP
extern "C" {
#include "lwip/tcp_impl.h"
#include "lwip/tcpip.h"
#include "lwip/ip.h"
#include "lwip/netif.h"
#include "lwip/dhcp.h"
#include "lwip/sockets.h"
#include "netif/etharp.h"
}
#endif
#include "types.h"
#include "kernel.hh"
#include "queue.h"
......@@ -21,6 +9,16 @@ extern "C" {
#include "net.hh"
#ifdef LWIP
extern "C" {
#include "lwip/tcp_impl.h"
#include "lwip/tcpip.h"
#include "lwip/ip.h"
#include "lwip/netif.h"
#include "lwip/dhcp.h"
#include "lwip/sockets.h"
#include "netif/etharp.h"
}
err_t if_init(struct netif *netif);
void if_input(struct netif *netif, void *buf, u16 len);
#endif
......
#include "types.h"
#include "kernel.hh"
#include "spinlock.h"
#include "condvar.h"
#include "fs.h"
#include "kernel.hh"
#include "stat.h"
#include "kalloc.h"
#include "file.hh"
......
......@@ -176,11 +176,6 @@ vmap::vmap()
vmap::~vmap()
{
for (range *r: cr) {
delete (vma*) r->value;
cr.del(r->key, r->size);
}
if (kshared)
ksfree(slab_kshared, kshared);
if (pml4)
......@@ -228,7 +223,10 @@ vmap::copy(int share)
goto err;
ne->n->ref++;
nm->cr.add(ne->vma_start, ne->vma_end - ne->vma_start, (void *) ne);
auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
for (auto x __attribute__((unused)): span)
assert(0); /* span must be empty */
span.replace(new range(&nm->cr, ne->vma_start, ne->vma_end - ne->vma_start, ne, 0));
}
if (share)
......@@ -271,11 +269,14 @@ vmap::insert(vmnode *n, uptr vma_start)
scoped_acquire sa(&lock);
u64 len = n->npages * PGSIZE;
if (lookup(vma_start, len)) {
cprintf("vmap_insert: overlap\n");
auto span = cr.search_lock(vma_start, len);
for (auto x __attribute__((unused)): span) {
cprintf("vmap::insert: overlap\n");
return -1;
}
// XXX handle overlaps
vma *e = new vma();
if (e == 0)
return -1;
......@@ -284,7 +285,10 @@ vmap::insert(vmnode *n, uptr vma_start)
e->vma_end = vma_start + len;
e->n = n;
n->ref++;
cr.add(e->vma_start, len, (void *) e);
span.replace(new range(&cr, vma_start, len, e, 0));
// XXX shootdown
return 0;
}
......@@ -293,16 +297,21 @@ vmap::remove(uptr vma_start, uptr len)
{
scoped_acquire sa(&lock);
uptr vma_end = vma_start + len;
struct range *r = cr.search(vma_start, len);
if (r == 0)
panic("no vma?");
struct vma *e = (struct vma *) r->value;
if (e->vma_start != vma_start || e->vma_end != vma_end) {
cprintf("vmap_remove: partial unmap unsupported\n");
return -1;
auto span = cr.search_lock(vma_start, len);
for (auto x: span) {
if (x->key < vma_start || x->key + x->size > vma_end) {
cprintf("vmap::remove: partial unmap not supported\n");
return -1;
}
}
cr.del(vma_start, len);
gc_delayed(e);
// XXX handle partial unmap
span.replace(0);
// XXX shootdown
return 0;
}
......@@ -311,17 +320,17 @@ vmap::remove(uptr vma_start, uptr len)
*/
vma *
vmap::pagefault_ondemand(uptr va, u32 err, vma *m)
vmap::pagefault_ondemand(uptr va, u32 err, vma *m, scoped_acquire *mlock)
{
if (m->n->allocpg() < 0)
panic("pagefault: couldn't allocate pages");
release(&m->lock);
mlock->release();
if (m->n->demand_load() < 0)
panic("pagefault: couldn't load");
m = lookup(va, 1);
if (!m)
panic("pagefault_ondemand");
acquire(&m->lock); // re-acquire lock on m
mlock->acquire(&m->lock); // re-acquire lock on m
return m;
}
......@@ -363,21 +372,19 @@ vmap::pagefault(uptr va, u32 err)
if (m == 0)
return -1;
acquire(&m->lock);
scoped_acquire mlock(&m->lock);
u64 npg = (PGROUNDDOWN(va) - m->vma_start) / PGSIZE;
if (m->n && m->n->type == ONDEMAND && m->n->page[npg] == 0)
m = pagefault_ondemand(va, err, m);
m = pagefault_ondemand(va, err, m, &mlock);
if (vm_debug)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(va, pte, m, npg) < 0) {
release(&m->lock);
if (pagefault_wcow(va, pte, m, npg) < 0)
return -1;
}
} else if (m->va_type == COW) {
*pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_COW;
} else {
......@@ -388,7 +395,6 @@ vmap::pagefault(uptr va, u32 err)
// XXX(sbw) Why reload hardware page tables?
lcr3(v2p(pml4)); // Reload hardware page tables
release(&m->lock);
return 1;
}
......
#include "gc.hh"
#include "atomic.hh"
#include "crange.hh"
#include "cpputil.hh"
using std::atomic;
......@@ -68,6 +69,6 @@ struct vmap {
int copyout(uptr va, void *p, u64 len);
private:
vma* pagefault_ondemand(uptr va, u32 err, vma *m);
vma* pagefault_ondemand(uptr va, u32 err, vma *m, scoped_acquire *mlock);
int pagefault_wcow(uptr va, pme_t *pte, vma *m, u64 npg);
};
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论