提交 406778a3 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Merge branch 'scale-amd64' of git+ssh://amsterdam.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

...@@ -1741,6 +1741,24 @@ unmappedtest(void) ...@@ -1741,6 +1741,24 @@ unmappedtest(void)
printf("unmappedtest ok\n"); printf("unmappedtest ok\n");
} }
static int nenabled;
static char **enabled;
void
run_test(const char *name, void (*test)())
{
if (!nenabled) {
test();
} else {
for (int i = 0; i < nenabled; i++) {
if (strcmp(name, enabled[i]) == 0) {
test();
break;
}
}
}
}
int int
main(int argc, char *argv[]) main(int argc, char *argv[])
{ {
...@@ -1752,47 +1770,52 @@ main(int argc, char *argv[]) ...@@ -1752,47 +1770,52 @@ main(int argc, char *argv[])
} }
close(open("usertests.ran", O_CREATE)); close(open("usertests.ran", O_CREATE));
unopentest(); nenabled = argc - 1;
bigargtest(); enabled = argv + 1;
bsstest();
sbrktest(); #define TEST(name) run_test(#name, name)
TEST(unopentest);
TEST(bigargtest);
TEST(bsstest);
TEST(sbrktest);
// we should be able to grow a user process to consume all phys mem // we should be able to grow a user process to consume all phys mem
unmappedtest(); TEST(unmappedtest);
validatetest(); TEST(validatetest);
opentest(); TEST(opentest);
writetest(); TEST(writetest);
writetest1(); TEST(writetest1);
createtest(); TEST(createtest);
preads(); TEST(preads);
// mem(); // TEST(mem);
pipe1(); TEST(pipe1);
preempt(); TEST(preempt);
exitwait(); TEST(exitwait);
rmdot(); TEST(rmdot);
thirteen(); TEST(thirteen);
longname(); TEST(longname);
bigfile(); TEST(bigfile);
subdir(); TEST(subdir);
concreate(); TEST(concreate);
linktest(); TEST(linktest);
unlinkread(); TEST(unlinkread);
createdelete(); TEST(createdelete);
twofiles(); TEST(twofiles);
sharedfd(); TEST(sharedfd);
dirfile(); TEST(dirfile);
iref(); TEST(iref);
forktest(); TEST(forktest);
bigdir(); // slow TEST(bigdir); // slow
tls_test(); TEST(tls_test);
thrtest(); TEST(thrtest);
ftabletest(); TEST(ftabletest);
exectest(); TEST(exectest);
exit(); exit();
} }
#pragma once #pragma once
template<class A, class B> #include <type_traits>
class pair { #include <utility>
public:
A _a;
B _b;
pair(const A &a, const B &b) : _a(a), _b(b) {} using std::pair;
using std::make_pair;
bool operator==(const pair<A, B> &other) {
return _a == other._a && _b == other._b;
}
};
template<int N> template<int N>
class strbuf { class strbuf {
...@@ -27,13 +20,6 @@ class strbuf { ...@@ -27,13 +20,6 @@ class strbuf {
} }
}; };
template<class A, class B>
pair<A, B>
mkpair(const A &a, const B &b)
{
return pair<A, B>(a, b);
}
class scoped_acquire { class scoped_acquire {
private: private:
spinlock *_l; spinlock *_l;
...@@ -48,25 +34,6 @@ class scoped_acquire { ...@@ -48,25 +34,6 @@ class scoped_acquire {
class retryable {}; class retryable {};
namespace std { namespace std {
template<class T>
struct remove_reference
{ typedef T type; };
template<class T>
struct remove_reference<T&>
{ typedef T type; };
template<class T>
struct remove_reference<T&&>
{ typedef T type; };
template<class T>
typename remove_reference<T>::type&&
move(T&& a)
{
return static_cast<typename remove_reference<T>::type&&>(a);
}
struct ostream { int next_width; }; struct ostream { int next_width; };
extern ostream cout; extern ostream cout;
......
...@@ -5,12 +5,145 @@ ...@@ -5,12 +5,145 @@
*/ */
#include "gc.hh" #include "gc.hh"
#include "markptr.hh"
enum { bits_per_level = 9 }; enum { bits_per_level = 6 };
enum { key_bits = 36 }; enum { key_bits = 36 };
enum { radix_levels = (key_bits + bits_per_level - 1) / bits_per_level }; enum { radix_levels = (key_bits + bits_per_level - 1) / bits_per_level };
class radix_elem;
class radix_node;
/*
* Each pointer to a radix_elem or radix_node can be in one of four
* states:
*
* - pointer to radix_node
* - unlocked leaf
* - locked leaf
* - dead leaf
*
* A leaf is either a pointer to a radix_elem or null.
*
* Before making semantic modifications to a range, the range must be
* locked. This is done by locking the leaf pointers (be they to
* radix_entry or null) corresponding to that range. If necessary, a
* leaf may be "pushed down" and replaced with a pointer to radix_node
* full of the old value to get the endpoints accurate. Locking NEVER
* happens higher level than the current set of leaves.
*
* We assuming that a thread attempting to push down a leaf is doing
* so to lock it.
*
* When replacing a range, we'd like to possibly retire old
* radix_nodes when their contents are all set to be the same. Before
* doing this, all leaves under that radix_node must be locked. We
* transition them to 'dead leaf' state. This informs all others
* attempting to lock the pointer to retry. The radix_node itself is
* RCU-freed. To avoid restarting writers, set the leaves to the right
* value too. Replaced elements are written in locked state, to be
* unlocked when the radix_range goes away.
*
* Once a pointer is dead, it stays dead until the containing
* radix_node is deallocated. Dead pointers do not own references.
*
* For now we do not implement the dead state. It is only necessary
* when collapsing an already-expanded node. It's unclear this
* optimization is very useful as it requires RCU-freeing radix_nodes,
* which makes them just over a power of 2 and inefficient to
* allocate.
*
* Races:
*
* - If a leaf to be locked (or pushed down) gets pushed down, lock
* the new radix_node at a more granular level.
*
* - If a leaf to be locked (or pushed down) goes dead, restart
* everything from the root. Many values may have gone invalid.
*
* - If a leaf to be locked (or pushed down) gets locked, spin.
*
* [*] XXX: Try not to bounce on the radix_elem refcount too much.
*/
enum entry_state {
entry_unlocked = 0,
entry_locked = 1,
// entry_dead = 2,
entry_node = 2,
entry_mask = 3
};
class radix_entry {
public:
radix_entry()
: value_(0 | entry_unlocked) { }
explicit radix_entry(uptr value)
: value_(value) { }
explicit radix_entry(radix_node *ptr)
: value_(reinterpret_cast<uptr>(ptr) | entry_node) {
// XXX: This is kinda wonky. Maybe switch the status to
// entry_unlocked is ptr is null, make null pass both is_elem()
// and is_node().
assert(ptr != nullptr);
}
explicit radix_entry(radix_elem *ptr, entry_state state = entry_unlocked)
: value_(reinterpret_cast<uptr>(ptr) | state) {
assert(state != entry_node);
}
explicit radix_entry(decltype(nullptr) nullp,
entry_state state = entry_unlocked)
: value_(0 | state) {
assert(state != entry_node);
}
uptr value() const { return value_; }
uptr& value() { return value_; }
entry_state state() const {
return static_cast<entry_state>(value_ & entry_mask);
}
uptr ptr() const { return value_ & ~entry_mask; }
bool is_node() const { return state() == entry_node; }
bool is_elem() const { return !is_node(); }
bool is_null() const { return ptr() == 0; }
// Convenience function
radix_entry with_state(entry_state state) {
return radix_entry(elem(), state);
}
radix_elem *elem() const {
assert(is_elem());
return reinterpret_cast<radix_elem*>(ptr());
}
radix_node *node() const {
assert(is_node());
return reinterpret_cast<radix_node*>(ptr());
}
void release();
private:
uptr value_;
};
// Our version of std::atomic doesn't work for structs, even if they
// are integer sized.
class radix_ptr {
public:
radix_ptr() : ptr_(radix_entry().value()) { }
radix_ptr(radix_entry e) : ptr_(e.value()) { }
radix_entry load() const { return radix_entry(ptr_.load()); }
void store(radix_entry e) { ptr_.store(e.value()); }
bool compare_exchange_weak(radix_entry &old, radix_entry val) {
return ptr_.compare_exchange_weak(old.value(), val.value());
}
private:
static_assert(sizeof(uptr) == sizeof(radix_entry),
"radix_entry is a uptr");
std::atomic<uptr> ptr_;
};
class radix_elem : public rcu_freed { class radix_elem : public rcu_freed {
private: private:
bool deleted_; bool deleted_;
...@@ -19,19 +152,29 @@ class radix_elem : public rcu_freed { ...@@ -19,19 +152,29 @@ class radix_elem : public rcu_freed {
public: public:
radix_elem() : rcu_freed("radix_elem"), deleted_(false), ref_(0) {} radix_elem() : rcu_freed("radix_elem"), deleted_(false), ref_(0) {}
bool deleted() { return deleted_; } bool deleted() { return deleted_; }
void decref() { if (--ref_ == 0) { deleted_ = true; gc_delayed(this); } } void decref(u64 delta = 1) {
void incref() { ref_++; } if ((ref_ -= delta) == 0) {
deleted_ = true;
gc_delayed(this);
}
}
void incref(u64 delta = 1) { ref_ += delta; }
}; };
struct radix_node { struct radix_node {
markptr<void> ptr[1 << bits_per_level]; radix_ptr child[1 << bits_per_level];
radix_node() { radix_node() { }
for (int i = 0; i < sizeof(ptr) / sizeof(ptr[0]); i++) ~radix_node();
ptr[i] = 0;
}
NEW_DELETE_OPS(radix_node) NEW_DELETE_OPS(radix_node)
}; };
// Assert we have enough spare bits for all flags.
static_assert(alignof(radix_node) > entry_mask,
"radix_node sufficiently aligned");
static_assert(alignof(radix_elem) > entry_mask,
"radix_elem sufficiently aligned");
struct radix; struct radix;
struct radix_range { struct radix_range {
...@@ -50,58 +193,43 @@ struct radix_range { ...@@ -50,58 +193,43 @@ struct radix_range {
}; };
struct radix { struct radix {
markptr<void> root_; radix_ptr root_;
u32 shift_; u32 shift_;
radix(u32 shift) : root_(0), shift_(shift) { radix(u32 shift) : root_(radix_entry(new radix_node())), shift_(shift) {
root_.ptr() = new radix_node();
} }
~radix();
radix_elem* search(u64 key); radix_elem* search(u64 key);
radix_range search_lock(u64 start, u64 size); radix_range search_lock(u64 start, u64 size);
// k is shifted value.
u64 skip_empty(u64 k) const;
NEW_DELETE_OPS(radix) NEW_DELETE_OPS(radix)
}; };
struct radix_iterator { struct radix_iterator {
const radix* r_; const radix* r_;
u64 k_; u64 k_;
radix_iterator(const radix* r, u64 k) : r_(r), k_(r->skip_empty(k)) {}
radix_iterator &operator++() { k_++; k_ = r_->skip_empty(k_); return *this; }
radix_elem* operator*();
bool operator==(const radix_iterator &other) {
return r_ == other.r_ && k_ == other.k_; }
bool operator!=(const radix_iterator &other) {
return r_ != other.r_ || k_ != other.k_; }
};
struct radix_iterator2 {
const radix* r_;
u64 k_;
// path_[i] is the node at level i. Note that the leaf is at zero // path_[i] is the node at level i. Note that the leaf is at zero
// and is radix_elem. The rest are radix_node. For now we assume all // and is radix_elem. The rest are radix_node. For now we assume all
// leaves are at level 0. Later we'll steal a bit for them. The root // leaves are at level 0. Later we'll steal a bit for them. The root
// is path_[radix_levels]. // is path_[radix_levels].
void *path_[radix_levels+1]; radix_entry path_[radix_levels+1];
u32 leaf_;
radix_iterator2(const radix* r, u64 k); radix_iterator(const radix* r, u64 k);
radix_iterator2 &operator++() { radix_iterator &operator++() {
if (!advance(radix_levels-1)) k_ = ~0ULL; if (!advance(radix_levels-1)) k_ = ~0ULL;
return *this; return *this;
} }
radix_elem* operator*() { radix_elem* operator*() {
return (radix_elem*)path_[0]; return path_[leaf_].elem();
} }
radix_node* node(u32 level) { return (radix_node*)path_[level]; } radix_node* node(u32 level) { return path_[level].node(); }
// Compare equality on just the key. // Compare equality on just the key.
bool operator==(const radix_iterator2 &other) { bool operator==(const radix_iterator &other) {
return r_ == other.r_ && k_ == other.k_; } return r_ == other.r_ && k_ == other.k_; }
bool operator!=(const radix_iterator2 &other) { bool operator!=(const radix_iterator &other) {
return r_ != other.r_ || k_ != other.k_; } return r_ != other.r_ || k_ != other.k_; }
private: private:
...@@ -109,8 +237,6 @@ private: ...@@ -109,8 +237,6 @@ private:
bool advance(u32 level); bool advance(u32 level);
}; };
#define radix_iterator radix_iterator2
static inline radix_iterator static inline radix_iterator
begin(const radix &r) { return radix_iterator(&r, 0); } begin(const radix &r) { return radix_iterator(&r, 0); }
...@@ -123,5 +249,3 @@ begin(const radix_range &rr) { return radix_iterator(rr.r_, rr.start_); } ...@@ -123,5 +249,3 @@ begin(const radix_range &rr) { return radix_iterator(rr.r_, rr.start_); }
static inline radix_iterator static inline radix_iterator
end(const radix_range &rr) { return radix_iterator(rr.r_, rr.start_ + rr.size_); } end(const radix_range &rr) { return radix_iterator(rr.r_, rr.start_ + rr.size_); }
#undef radix_iterator
...@@ -72,11 +72,11 @@ struct vma ...@@ -72,11 +72,11 @@ struct vma
// The elements of e[] are not ordered by address. // The elements of e[] are not ordered by address.
struct vmap { struct vmap {
#if VM_CRANGE #if VM_CRANGE
struct crange cr; struct crange vmas;
#endif #endif
#if VM_RADIX #if VM_RADIX
struct radix rx; struct radix vmas;
#endif #endif
static vmap* alloc(); static vmap* alloc();
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
u64 u64
bio_hash(const pair<u32, u64> &p) bio_hash(const pair<u32, u64> &p)
{ {
return p._a ^ p._b; return p.first ^ p.second;
} }
static xns<pair<u32, u64>, buf*, bio_hash> *bufns; static xns<pair<u32, u64>, buf*, bio_hash> *bufns;
...@@ -51,7 +51,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -51,7 +51,7 @@ bget(u32 dev, u64 sector, int *writer)
loop: loop:
// Try for cached block. // Try for cached block.
// XXX ignore dev // XXX ignore dev
b = bufns->lookup(mkpair(dev, sector)); b = bufns->lookup(make_pair(dev, sector));
if (b) { if (b) {
if (b->dev != dev || b->sector != sector) if (b->dev != dev || b->sector != sector)
panic("block mismatch"); panic("block mismatch");
...@@ -76,7 +76,7 @@ bget(u32 dev, u64 sector, int *writer) ...@@ -76,7 +76,7 @@ bget(u32 dev, u64 sector, int *writer)
b = new buf(dev, sector); b = new buf(dev, sector);
b->flags = B_BUSY; b->flags = B_BUSY;
*writer = 1; *writer = 1;
if (bufns->insert(mkpair(b->dev, b->sector), b) < 0) { if (bufns->insert(make_pair(b->dev, b->sector), b) < 0) {
gc_delayed(b); gc_delayed(b);
goto loop; goto loop;
} }
......
...@@ -152,7 +152,7 @@ bfree(int dev, u64 x) ...@@ -152,7 +152,7 @@ bfree(int dev, u64 x)
u64 u64
ino_hash(const pair<u32, u32> &p) ino_hash(const pair<u32, u32> &p)
{ {
return p._a ^ p._b; return p.first ^ p.second;
} }
static xns<pair<u32, u32>, inode*, ino_hash> *ins; static xns<pair<u32, u32>, inode*, ino_hash> *ins;
...@@ -268,7 +268,7 @@ igetnoref(u32 dev, u32 inum) ...@@ -268,7 +268,7 @@ igetnoref(u32 dev, u32 inum)
// Try for cached inode. // Try for cached inode.
{ {
scoped_gc_epoch e; scoped_gc_epoch e;
struct inode *ip = ins->lookup(mkpair(dev, inum)); struct inode *ip = ins->lookup(make_pair(dev, inum));
if (ip) { if (ip) {
if (!(ip->flags & I_VALID)) { if (!(ip->flags & I_VALID)) {
acquire(&ip->lock); acquire(&ip->lock);
...@@ -290,7 +290,7 @@ igetnoref(u32 dev, u32 inum) ...@@ -290,7 +290,7 @@ igetnoref(u32 dev, u32 inum)
snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum); snprintf(ip->lockname, sizeof(ip->lockname), "cv:ino:%d", ip->inum);
initlock(&ip->lock, ip->lockname+3, LOCKSTAT_FS); initlock(&ip->lock, ip->lockname+3, LOCKSTAT_FS);
initcondvar(&ip->cv, ip->lockname); initcondvar(&ip->cv, ip->lockname);
if (ins->insert(mkpair(ip->dev, ip->inum), ip) < 0) { if (ins->insert(make_pair(ip->dev, ip->inum), ip) < 0) {
gc_delayed(ip); gc_delayed(ip);
goto retry; goto retry;
} }
...@@ -399,7 +399,7 @@ iput(struct inode *ip) ...@@ -399,7 +399,7 @@ iput(struct inode *ip)
ip->gen += 1; ip->gen += 1;
iupdate(ip); iupdate(ip);
ins->remove(mkpair(ip->dev, ip->inum), &ip); ins->remove(make_pair(ip->dev, ip->inum), &ip);
gc_delayed(ip); gc_delayed(ip);
icache_free[mycpu()->id].x++; icache_free[mycpu()->id].x++;
return; return;
......
差异被折叠。
...@@ -168,7 +168,7 @@ vmnode::loadall() ...@@ -168,7 +168,7 @@ vmnode::loadall()
vma::vma(vmap *vmap, uptr start, uptr end, enum vmatype vtype, vmnode *vmn) : vma::vma(vmap *vmap, uptr start, uptr end, enum vmatype vtype, vmnode *vmn) :
#if VM_CRANGE #if VM_CRANGE
range(&vmap->cr, start, end-start), range(&vmap->vmas, start, end-start),
#endif #endif
vma_start(start), vma_end(end), va_type(vtype), n(vmn) vma_start(start), vma_end(end), va_type(vtype), n(vmn)
{ {
...@@ -194,10 +194,10 @@ vmap::alloc(void) ...@@ -194,10 +194,10 @@ vmap::alloc(void)
vmap::vmap() : vmap::vmap() :
#if VM_CRANGE #if VM_CRANGE
cr(10), vmas(10),
#endif #endif
#if VM_RADIX #if VM_RADIX
rx(PGSHIFT), vmas(PGSHIFT),
#endif #endif
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)), ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)),
brk_(0) brk_(0)
...@@ -253,12 +253,7 @@ vmap::incref() ...@@ -253,12 +253,7 @@ vmap::incref()
bool bool
vmap::replace_vma(vma *a, vma *b) vmap::replace_vma(vma *a, vma *b)
{ {
#if VM_CRANGE auto span = vmas.search_lock(a->vma_start, a->vma_end - a->vma_start);
auto span = cr.search_lock(a->vma_start, a->vma_end - a->vma_start);
#endif
#if VM_RADIX
auto span = rx.search_lock(a->vma_start, a->vma_end - a->vma_start);
#endif
if (a->deleted()) if (a->deleted())
return false; return false;
for (auto e: span) for (auto e: span)
...@@ -279,12 +274,11 @@ vmap::copy(int share) ...@@ -279,12 +274,11 @@ vmap::copy(int share)
{ {
vmap *nm = new vmap(); vmap *nm = new vmap();
#if VM_CRANGE
for (auto r: cr) {
#endif
#if VM_RADIX #if VM_RADIX
void *last = 0; void *last = 0;
for (auto r: rx) { #endif
for (auto r: vmas) {
#if VM_RADIX
if (!r || r == last) if (!r || r == last)
continue; continue;
last = r; last = r;
...@@ -318,12 +312,7 @@ vmap::copy(int share) ...@@ -318,12 +312,7 @@ vmap::copy(int share)
ne = new vma(nm, e->vma_start, e->vma_end, PRIVATE, e->n->copy()); ne = new vma(nm, e->vma_start, e->vma_end, PRIVATE, e->n->copy());
} }
#if VM_CRANGE auto span = nm->vmas.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
#endif
#if VM_RADIX
auto span = nm->rx.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
#endif
for (auto x: span) { for (auto x: span) {
#if VM_RADIX #if VM_RADIX
if (!x) if (!x)
...@@ -367,11 +356,11 @@ vmap::lookup(uptr start, uptr len) ...@@ -367,11 +356,11 @@ vmap::lookup(uptr start, uptr len)
panic("vmap::lookup bad len"); panic("vmap::lookup bad len");
#if VM_CRANGE #if VM_CRANGE
auto r = cr.search(start, len); auto r = vmas.search(start, len);
#endif #endif
#if VM_RADIX #if VM_RADIX
assert(len <= PGSIZE); assert(len <= PGSIZE);
auto r = rx.search(start); auto r = vmas.search(start);
#endif #endif
if (r != 0) { if (r != 0) {
vma *e = (vma *) r; vma *e = (vma *) r;
...@@ -405,12 +394,7 @@ again: ...@@ -405,12 +394,7 @@ again:
{ {
// new scope to release the search lock before tlbflush // new scope to release the search lock before tlbflush
u64 len = n->npages * PGSIZE; u64 len = n->npages * PGSIZE;
#if VM_CRANGE auto span = vmas.search_lock(vma_start, len);
auto span = cr.search_lock(vma_start, len);
#endif
#if VM_RADIX
auto span = rx.search_lock(vma_start, len);
#endif
for (auto r: span) { for (auto r: span) {
#if VM_RADIX #if VM_RADIX
if (!r) if (!r)
...@@ -474,12 +458,7 @@ vmap::remove(uptr vma_start, uptr len) ...@@ -474,12 +458,7 @@ vmap::remove(uptr vma_start, uptr len)
// new scope to release the search lock before tlbflush // new scope to release the search lock before tlbflush
uptr vma_end = vma_start + len; uptr vma_end = vma_start + len;
#if VM_CRANGE auto span = vmas.search_lock(vma_start, len);
auto span = cr.search_lock(vma_start, len);
#endif
#if VM_RADIX
auto span = rx.search_lock(vma_start, len);
#endif
for (auto r: span) { for (auto r: span) {
vma *rvma = (vma*) r; vma *rvma = (vma*) r;
if (rvma->vma_start < vma_start || rvma->vma_end > vma_end) { if (rvma->vma_start < vma_start || rvma->vma_end > vma_end) {
...@@ -756,17 +735,13 @@ vmap::sbrk(ssize_t n, uptr *addr) ...@@ -756,17 +735,13 @@ vmap::sbrk(ssize_t n, uptr *addr)
s64 newn = PGROUNDUP(n + curbrk - newstart); s64 newn = PGROUNDUP(n + curbrk - newstart);
#if VM_CRANGE #if VM_CRANGE
range *prev = 0; range *prev = 0;
auto span = cr.search_lock(newstart, newn + PGSIZE);
#endif #endif
#if VM_RADIX #if VM_RADIX
auto span = rx.search_lock(newstart, newn + PGSIZE); void *last = 0;
#endif #endif
#if VM_CRANGE auto span = vmas.search_lock(newstart, newn + PGSIZE);
for (auto r: span) { for (auto r: span) {
#endif
#if VM_RADIX #if VM_RADIX
void *last = 0;
for (auto r: span) {
if (!r || r == last) if (!r || r == last)
continue; continue;
last = r; last = r;
...@@ -824,7 +799,7 @@ vmap::unmapped_area(size_t npages) ...@@ -824,7 +799,7 @@ vmap::unmapped_area(size_t npages)
while (addr < USERTOP) { while (addr < USERTOP) {
#if VM_CRANGE #if VM_CRANGE
auto x = cr.search(addr, n); auto x = vmas.search(addr, n);
if (x == nullptr) if (x == nullptr)
return addr; return addr;
vma* a = (vma*) x; vma* a = (vma*) x;
...@@ -834,7 +809,7 @@ vmap::unmapped_area(size_t npages) ...@@ -834,7 +809,7 @@ vmap::unmapped_area(size_t npages)
#if VM_RADIX #if VM_RADIX
bool overlap = false; bool overlap = false;
for (uptr ax = addr; ax < addr+n; ax += PGSIZE) { for (uptr ax = addr; ax < addr+n; ax += PGSIZE) {
auto x = rx.search(ax); auto x = vmas.search(ax);
if (x != nullptr) { if (x != nullptr) {
overlap = true; overlap = true;
vma* a = (vma*) x; vma* a = (vma*) x;
......
// -*- c++ -*-
#pragma once
namespace std {
template<class T>
struct remove_reference
{ typedef T type; };
template<class T>
struct remove_reference<T&>
{ typedef T type; };
template<class T>
struct remove_reference<T&&>
{ typedef T type; };
}
// -*- c++ -*-
#pragma once
#include <type_traits>
namespace std {
template<class T>
typename remove_reference<T>::type&&
move(T&& a)
{
return static_cast<typename remove_reference<T>::type&&>(a);
}
template<class A, class B>
struct pair {
typedef A first_type;
typedef B second_type;
A first;
B second;
pair(const pair&) = default;
pair(pair&&) = default;
constexpr pair() : first(), second() {}
pair(const A &a, const B &b) : first(a), second(b) {}
bool operator==(const pair<A, B> &other) {
return first == other.first && second == other.second;
}
};
template<class A, class B>
pair<A, B>
make_pair(const A &a, const B &b)
{
return pair<A, B>(a, b);
}
}
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论