提交 ea984aa9 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Merge branch 'scale-amd64' of git+ssh://amsterdam.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

#pragma once
#define _GLIBCXX_VISIBILITY(x)
#define _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define _GLIBCXX_END_NAMESPACE_VERSION
#define _GLIBCXX_BEGIN_EXTERN_C extern "C" {
#define _GLIBCXX_END_EXTERN_C }
#define __glibcxx_assert(x)
#include "atomic_base.h"
#include "atomic_2.h"
template<class T>
struct atomic : public std::__atomic2::__atomic_base<T> {
atomic() = default;
~atomic() = default;
atomic(const atomic&) = delete;
atomic& operator=(const atomic&) = delete;
atomic& operator=(const atomic&) volatile = delete;
constexpr atomic(T v) : std::__atomic2::__atomic_base<T>(v) {}
using std::__atomic2::__atomic_base<T>::operator T;
using std::__atomic2::__atomic_base<T>::operator=;
};
差异被折叠。
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_BASE_H
#define _GLIBCXX_ATOMIC_BASE_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/**
* @defgroup atomics Atomics
*
* Components for performing atomic operations.
* @{
*/
/// Enumeration for memory_order
typedef enum memory_order
{
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
inline memory_order
__calculate_memory_order(memory_order __m)
{
const bool __cond1 = __m == memory_order_release;
const bool __cond2 = __m == memory_order_acq_rel;
memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
return __mo2;
}
void
atomic_thread_fence(memory_order);
void
atomic_signal_fence(memory_order);
/// kill_dependency
template<typename _Tp>
inline _Tp
kill_dependency(_Tp __y)
{
_Tp __ret(__y);
return __ret;
}
/**
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
* compatibilty with a C interface, this allows different
* implementations of atomic_flag to use the same atomic operation
* functions, via a standard conversion to the __atomic_flag_base
* argument.
*/
_GLIBCXX_BEGIN_EXTERN_C
struct __atomic_flag_base
{
bool _M_i;
};
_GLIBCXX_END_EXTERN_C
#define ATOMIC_FLAG_INIT { false }
// Base types for atomics.
//
// Three nested namespaces for atomic implementation details.
//
// The nested namespace inlined into std:: is determined by the value
// of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
// ATOMIC_*_LOCK_FREE macros.
//
// 0 == __atomic0 == Never lock-free
// 1 == __atomic1 == Best available, sometimes lock-free
// 2 == __atomic2 == Always lock-free
namespace __atomic0
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic2
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic1
{
using __atomic2::atomic_flag;
using __atomic0::__atomic_base;
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
&& defined(_GLIBCXX_ATOMIC_BUILTINS_4) && defined(_GLIBCXX_ATOMIC_BUILTINS_8)
# define _GLIBCXX_ATOMIC_PROPERTY 2
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic2
#elif defined(_GLIBCXX_ATOMIC_BUILTINS_1)
# define _GLIBCXX_ATOMIC_PROPERTY 1
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic1
#else
# define _GLIBCXX_ATOMIC_PROPERTY 0
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic0
#endif
#define ATOMIC_CHAR_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR16_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR32_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_WCHAR_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_SHORT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline namespace _GLIBCXX_ATOMIC_NAMESPACE { }
#define ATOMIC_VAR_INIT(_VI) { _VI }
template<typename _Tp>
struct atomic;
template<typename _Tp>
struct atomic<_Tp*>;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
......@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer)
if(!(b->flags & B_VALID))
iderw(b);
if (writer && !origwriter) {
__sync_fetch_and_and(&b->flags, ~B_BUSY);
b->flags &= ~B_BUSY;
cv_wakeup(&b->cv);
}
return b;
......@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer)
if (writer) {
if((b->flags & B_BUSY) == 0)
panic("brelse");
__sync_fetch_and_and(&b->flags, ~B_BUSY);
b->flags &= ~B_BUSY;
cv_wakeup(&b->cv);
}
// rcu_begin_read() happens in bread
......
#include "gc.hh"
#include "atomic.hh"
struct buf : public rcu_freed {
int flags;
atomic<int> flags;
u32 dev;
u64 sector;
struct buf *prev; // LRU cache list
......
差异被折叠。
#pragma once
class crange;
class range : public rcu_freed {
public:
u64 key;
u64 size;
void *value;
int curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
crange *cr; // the crange this range is part of
struct range** next; // one next pointer per level
struct spinlock *lock; // on separate cache line?
range(crange *cr, u64 k, u64 sz, void *v, struct range *n, int nlevel);
~range();
virtual void do_gc() {
delete this;
}
void print(int l);
void free_delayed();
void dec_ref(void);
int lockif(range *e);
} __mpalign__;
class crange {
range *crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
crange_check(struct range *absent);
crange_replace(u64, u64, void*, range*, range*, range*);
public:
int nlevel; // number of levels in the crange skip list
crange(int nlevel);
~crange(void);
void del(u64 k, u64 sz);
void add(u64 k, u64 sz, void *v);
range* search(u64 k, u64 sz, int mod);
int foreach(int (*cb)(range *r, void *), void *st);
void print(int);
void check(struct range *absent);
int del_index(range *p0, range **e, int l);
void add_index(int l, range *e, range *p1, range *s1);
int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
int find_and_lock(u64 k, u64 sz, range **p0, range **f0, range **l0, range **s0);
};
......@@ -28,7 +28,7 @@ filealloc(void)
struct file*
filedup(struct file *f)
{
if (__sync_fetch_and_add(&f->ref, 1) < 1)
if (f->ref++ < 1)
panic("filedup");
return f;
}
......@@ -37,7 +37,7 @@ filedup(struct file *f)
void
fileclose(struct file *f)
{
if (subfetch(&f->ref, 1) > 0)
if (--f->ref > 0)
return;
if(f->type == file::FD_PIPE)
......
#include "cpputil.hh"
#include "ns.hh"
#include "gc.hh"
#include "atomic.hh"
u64 namehash(const strbuf<DIRSIZ>&);
struct file {
enum { FD_NONE, FD_PIPE, FD_INODE, FD_SOCKET } type;
int ref; // reference count
atomic<int> ref; // reference count
char readable;
char writable;
......@@ -23,9 +24,9 @@ struct inode : public rcu_freed {
u32 dev; // Device number
u32 inum; // Inode number
u32 gen; // Generation number
int ref; // Reference count
atomic<int> ref; // Reference count
int flags; // I_BUSY, I_VALID
int readbusy;
atomic<int> readbusy;
struct condvar cv;
struct spinlock lock;
char lockname[16];
......
......@@ -249,10 +249,10 @@ iget(u32 dev, u32 inum)
ip = ins->lookup(mkpair(dev, inum));
if (ip) {
// tricky: first bump ref, then check free flag
__sync_fetch_and_add(&ip->ref, 1);
ip->ref++;
if (ip->flags & I_FREE) {
gc_end_epoch();
__sync_sub_and_fetch(&ip->ref, 1);
ip->ref--;
goto retry;
}
gc_end_epoch();
......@@ -339,7 +339,7 @@ iget(u32 dev, u32 inum)
struct inode*
idup(struct inode *ip)
{
__sync_fetch_and_add(&ip->ref, 1);
ip->ref++;
return ip;
}
......@@ -357,7 +357,7 @@ ilock(struct inode *ip, int writer)
while(ip->flags & (I_BUSYW | (writer ? I_BUSYR : 0)))
cv_sleep(&ip->cv, &ip->lock);
ip->flags |= I_BUSYR | (writer ? I_BUSYW : 0);
__sync_fetch_and_add(&ip->readbusy, 1);
ip->readbusy++;
release(&ip->lock);
if((ip->flags & I_VALID) == 0)
......@@ -372,7 +372,7 @@ iunlock(struct inode *ip)
panic("iunlock");
acquire(&ip->lock);
int lastreader = __sync_sub_and_fetch(&ip->readbusy, 1);
int lastreader = (--ip->readbusy);
ip->flags &= ~(I_BUSYW | ((lastreader==0) ? I_BUSYR : 0));
cv_wakeup(&ip->cv);
release(&ip->lock);
......@@ -382,7 +382,7 @@ iunlock(struct inode *ip)
void
iput(struct inode *ip)
{
if(__sync_sub_and_fetch(&ip->ref, 1) == 0) {
if(--ip->ref == 0) {
if (ip->nlink)
return;
acquire(&ip->lock);
......@@ -407,7 +407,7 @@ iput(struct inode *ip)
}
ip->flags |= (I_BUSYR | I_BUSYW);
__sync_fetch_and_add(&ip->readbusy, 1);
ip->readbusy++;
release(&ip->lock);
......
......@@ -11,6 +11,8 @@ extern "C" {
}
#include "ns.hh"
#include "atomic.hh"
extern u64 proc_hash(const u32&);
extern xns<u32, proc*, proc_hash> *xnspid;
......@@ -41,7 +43,7 @@ static struct gc_state {
struct condvar cv;
headinfo delayed[NEPOCH];
headinfo tofree[NEPOCH];
int ndelayed;
atomic<int> ndelayed;
int min_epoch;
int nrun;
int nfree;
......@@ -156,13 +158,13 @@ gc_delayfreelist(void)
void
gc_delayed(rcu_freed *e)
{
__sync_fetch_and_add(&gc_state[mycpu()->id].ndelayed, 1);
gc_state[mycpu()->id].ndelayed++;
pushcli();
int c = mycpu()->id;
u64 myepoch = myproc()->epoch;
u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch;
if (gc_debug)
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed);
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed.load());
if (myepoch != minepoch) {
cprintf("%d: myepoch %lu minepoch %lu\n", myproc()->pid, myepoch, minepoch);
panic("gc_delayed_int");
......@@ -226,7 +228,7 @@ gc_worker(void *x)
for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) {
int nfree = gc_free_tofreelist(&(gc_state[mycpu()->id].tofree[i%NEPOCH].head), i);
gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH;
__sync_fetch_and_sub(&gc_state[mycpu()->id].ndelayed, nfree);
gc_state[mycpu()->id].ndelayed -= nfree;
if (0 && nfree > 0) {
cprintf("%d: epoch %lu freed %d\n", mycpu()->id, i, nfree);
}
......
......@@ -55,38 +55,6 @@ void consoleintr(int(*)(void));
#define assert(c) if (!(c)) { cprintf("%s:%d: ", __FILE__, __LINE__); panic("assertion failure"); }
// crange.c
#if __cplusplus
#include "gc.hh"
struct range : public rcu_freed {
u64 key;
u64 size;
void *value;
int curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
struct crange *cr; // the crange this range is part of
struct range** next; // one next pointer per level
struct spinlock *lock; // on separate cache line?
range() : rcu_freed("range") {}
void do_gc();
} __mpalign__;
struct crange {
int nlevel; // number of levels in the crange skip list
struct range crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
};
struct crange* crange_alloc(int nlevel);
void crange_free(struct crange *cr);
void crange_del(struct crange *cr, u64 k, u64 sz);
void crange_add(struct crange *cr, u64 k, u64 sz, void *v);
struct range* crange_search(struct crange *cr, u64 k, u64 sz, int mod);
int crange_foreach(struct crange *crk, int (*f)(struct range *r, void *st), void *st);
void crange_print(struct crange *cr, int);
#endif
// e1000.c
extern int e1000irq;
extern int e1000init;
......
......@@ -69,7 +69,7 @@ sys_kernlet(int fd, size_t count, off_t off)
if(f->type != file::FD_INODE)
return -1;
fetchadd(&f->ip->ref, 1);
f->ip->ref++;
w = pread_allocwork(f->ip, myproc()->vmap->kshared, count, off);
if (w == NULL) {
iput(f->ip);
......
......@@ -582,7 +582,7 @@ fork(int flags)
}
} else {
np->vmap = myproc()->vmap;
__sync_fetch_and_add(&np->vmap->ref, 1);
np->vmap->ref++;
}
np->brk = myproc()->brk;
......
......@@ -18,4 +18,5 @@ typedef pme_t pml4e_t;
typedef s64 ssize_t;
typedef u64 size_t;
typedef u64 off_t;
typedef s64 ptrdiff_t;
#endif
......@@ -14,6 +14,7 @@ extern "C" {
#include "vm.hh"
#include "gc.hh"
#include "crange.hh"
static void vmap_free(void *p);
......@@ -22,7 +23,7 @@ enum { vm_debug = 0 };
static void
vmn_decref(struct vmnode *n)
{
if(subfetch(&n->ref, 1) == 0)
if(--n->ref == 0)
vmn_free(n);
}
......@@ -84,7 +85,7 @@ vmn_allocpg(u64 npg)
void
vmap_decref(struct vmap *m)
{
if(subfetch(&m->ref, 1) == 0)
if(--m->ref == 0)
vmap_free(m);
}
......@@ -114,7 +115,7 @@ vmap_alloc(void)
return 0;
}
#ifdef TREE
m->cr = crange_alloc(10);
m->cr = new crange(10);
if (m->cr == 0)
return 0;
#endif
......@@ -216,7 +217,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
if (vm_debug)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, m->n->ref, myproc()->pid);
err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) {
......@@ -313,10 +314,10 @@ struct state {
};
static int
vmap_free_vma(struct range *r, void *st)
vmap_free_vma(range *r, void *st)
{
delete (vma *) r->value;
crange_del(r->cr, r->key, r->size);
r->cr->del(r->key, r->size);
return 1;
}
......@@ -324,8 +325,8 @@ static void
vmap_free(void *p)
{
struct vmap *m = (struct vmap *) p;
crange_foreach(m->cr, vmap_free_vma, NULL);
crange_free(m->cr);
m->cr->foreach(vmap_free_vma, NULL);
delete m->cr;
ksfree(slab_kshared, m->kshared);
freevm(m->pml4);
m->pml4 = 0;
......@@ -346,7 +347,7 @@ vmap_lookup(struct vmap *m, uptr start, uptr len)
if(start + len < start)
panic("vmap_lookup bad len");
struct range *r = crange_search(m->cr, start, len, 0);
range *r = m->cr->search(start, len, 0);
if (r != 0) {
struct vma *e = (struct vma *) (r->value);
if (e->va_end <= e->va_start)
......@@ -379,8 +380,8 @@ vmap_insert(struct vmap *m, struct vmnode *n, uptr va_start)
e->va_start = va_start;
e->va_end = va_start + len;
e->n = n;
__sync_fetch_and_add(&n->ref, 1);
crange_add(m->cr, e->va_start, len, (void *) e);
n->ref++;
m->cr->add(e->va_start, len, (void *) e);
release(&m->lock);
return 0;
}
......@@ -410,8 +411,8 @@ vmap_copy_vma(struct range *r, void *_st)
if(c->n == 0) {
return 0;
}
__sync_fetch_and_add(&c->n->ref, 1);
crange_add(st->cr, c->va_start, c->va_end - c->va_start, (void *) c);
c->n->ref++;
st->cr->add(c->va_start, c->va_end - c->va_start, (void *) c);
return 1;
}
......@@ -427,7 +428,7 @@ vmap_copy(struct vmap *m, int share)
st.share = share;
st.pml4 = m->pml4;
st.cr = c->cr;
if (!crange_foreach(m->cr, vmap_copy_vma, &st)) {
if (!m->cr->foreach(vmap_copy_vma, &st)) {
vmap_free(c);
release(&m->lock);
return 0;
......@@ -445,7 +446,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
{
acquire(&m->lock);
uptr va_end = va_start + len;
struct range *r = crange_search(m->cr, va_start, len, 0);
struct range *r = m->cr->search(va_start, len, 0);
if (r == 0)
panic("no vma?");
struct vma *e = (struct vma *) r->value;
......@@ -454,7 +455,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
release(&m->lock);
return -1;
}
crange_del(m->cr, va_start, len);
m->cr->del(va_start, len);
gc_delayed(e);
release(&m->lock);
return 0;
......
#define TREE
#include "gc.hh"
#include "atomic.hh"
// A mapping of a chunk of an address space to
// a specific memory object.
......@@ -27,7 +28,7 @@ struct vma : public rcu_freed {
struct vmnode {
u64 npages;
char *page[128];
u64 ref;
atomic<u64> ref;
enum vmntype type;
struct inode *ip;
u64 offset;
......@@ -44,7 +45,7 @@ struct vmap {
struct vma* e[16];
#endif
struct spinlock lock; // serialize map/lookup/unmap
u64 ref;
atomic<u64> ref;
u64 alloc;
pml4e_t *pml4; // Page table
char *kshared;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论