提交 ea984aa9 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Merge branch 'scale-amd64' of git+ssh://amsterdam.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

#pragma once
#define _GLIBCXX_VISIBILITY(x)
#define _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define _GLIBCXX_END_NAMESPACE_VERSION
#define _GLIBCXX_BEGIN_EXTERN_C extern "C" {
#define _GLIBCXX_END_EXTERN_C }
#define __glibcxx_assert(x)
#include "atomic_base.h"
#include "atomic_2.h"
template<class T>
struct atomic : public std::__atomic2::__atomic_base<T> {
atomic() = default;
~atomic() = default;
atomic(const atomic&) = delete;
atomic& operator=(const atomic&) = delete;
atomic& operator=(const atomic&) volatile = delete;
constexpr atomic(T v) : std::__atomic2::__atomic_base<T>(v) {}
using std::__atomic2::__atomic_base<T>::operator T;
using std::__atomic2::__atomic_base<T>::operator=;
};
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_2.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_2_H
#define _GLIBCXX_ATOMIC_2_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
// 2 == __atomic2 == Always lock-free
// Assumed:
// _GLIBCXX_ATOMIC_BUILTINS_1
// _GLIBCXX_ATOMIC_BUILTINS_2
// _GLIBCXX_ATOMIC_BUILTINS_4
// _GLIBCXX_ATOMIC_BUILTINS_8
namespace __atomic2
{
/// atomic_flag
struct atomic_flag : public __atomic_flag_base
{
atomic_flag() = default;
~atomic_flag() = default;
atomic_flag(const atomic_flag&) = delete;
atomic_flag& operator=(const atomic_flag&) = delete;
atomic_flag& operator=(const atomic_flag&) volatile = delete;
// Conversion to ATOMIC_FLAG_INIT.
atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
bool
test_and_set(memory_order __m = memory_order_seq_cst)
{
// Redundant synchronize if built-in for lock is a full barrier.
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
return __sync_lock_test_and_set(&_M_i, 1);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile
{
// Redundant synchronize if built-in for lock is a full barrier.
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
return __sync_lock_test_and_set(&_M_i, 1);
}
void
clear(memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_consume);
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
}
void
clear(memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_consume);
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
}
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
//
// atomic_bool bool
// atomic_char char
// atomic_schar signed char
// atomic_uchar unsigned char
// atomic_short short
// atomic_ushort unsigned short
// atomic_int int
// atomic_uint unsigned int
// atomic_long long
// atomic_ulong unsigned long
// atomic_llong long long
// atomic_ullong unsigned long long
// atomic_char16_t char16_t
// atomic_char32_t char32_t
// atomic_wchar_t wchar_t
//
// NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
// 8 bytes, since that is what GCC built-in functions for atomic
// memory access expect.
template<typename _ITp>
struct __atomic_base
{
private:
typedef _ITp __int_type;
__int_type _M_i;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __int_type convertible to _M_i.
constexpr __atomic_base(__int_type __i): _M_i (__i) { }
operator __int_type() const
{ return load(); }
operator __int_type() const volatile
{ return load(); }
__int_type
operator=(__int_type __i)
{
store(__i);
return __i;
}
__int_type
operator=(__int_type __i) volatile
{
store(__i);
return __i;
}
__int_type
operator++(int)
{ return fetch_add(1); }
__int_type
operator++(int) volatile
{ return fetch_add(1); }
__int_type
operator--(int)
{ return fetch_sub(1); }
__int_type
operator--(int) volatile
{ return fetch_sub(1); }
__int_type
operator++()
{ return __sync_add_and_fetch(&_M_i, 1); }
__int_type
operator++() volatile
{ return __sync_add_and_fetch(&_M_i, 1); }
__int_type
operator--()
{ return __sync_sub_and_fetch(&_M_i, 1); }
__int_type
operator--() volatile
{ return __sync_sub_and_fetch(&_M_i, 1); }
__int_type
operator+=(__int_type __i)
{ return __sync_add_and_fetch(&_M_i, __i); }
__int_type
operator+=(__int_type __i) volatile
{ return __sync_add_and_fetch(&_M_i, __i); }
__int_type
operator-=(__int_type __i)
{ return __sync_sub_and_fetch(&_M_i, __i); }
__int_type
operator-=(__int_type __i) volatile
{ return __sync_sub_and_fetch(&_M_i, __i); }
__int_type
operator&=(__int_type __i)
{ return __sync_and_and_fetch(&_M_i, __i); }
__int_type
operator&=(__int_type __i) volatile
{ return __sync_and_and_fetch(&_M_i, __i); }
__int_type
operator|=(__int_type __i)
{ return __sync_or_and_fetch(&_M_i, __i); }
__int_type
operator|=(__int_type __i) volatile
{ return __sync_or_and_fetch(&_M_i, __i); }
__int_type
operator^=(__int_type __i)
{ return __sync_xor_and_fetch(&_M_i, __i); }
__int_type
operator^=(__int_type __i) volatile
{ return __sync_xor_and_fetch(&_M_i, __i); }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__int_type __i, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __i;
else
{
// write_mem_barrier();
_M_i = __i;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __i;
else
{
// write_mem_barrier();
_M_i = __i;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__int_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__int_type __ret = _M_i;
__sync_synchronize();
return __ret;
}
__int_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__int_type __ret = _M_i;
__sync_synchronize();
return __ret;
}
__int_type
exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __i);
}
__int_type
exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __i);
}
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2)
{ return compare_exchange_strong(__i1, __i2, __m1, __m2); }
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) volatile
{ return compare_exchange_strong(__i1, __i2, __m1, __m2); }
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__int_type __i1o = __i1;
__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
// Assume extra stores (of same value) allowed in true case.
__i1 = __i1n;
return __i1o == __i1n;
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__int_type __i1o = __i1;
__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
// Assume extra stores (of same value) allowed in true case.
__i1 = __i1n;
return __i1o == __i1n;
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__i1, __i2, __m,
__calculate_memory_order(__m));
}
__int_type
fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_i, __i); }
__int_type
fetch_add(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_i, __i); }
__int_type
fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_i, __i); }
__int_type
fetch_sub(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_i, __i); }
__int_type
fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_and(&_M_i, __i); }
__int_type
fetch_and(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_and(&_M_i, __i); }
__int_type
fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_or(&_M_i, __i); }
__int_type
fetch_or(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_or(&_M_i, __i); }
__int_type
fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_xor(&_M_i, __i); }
__int_type
fetch_xor(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_xor(&_M_i, __i); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __pointer_type;
__pointer_type _M_p;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_p.
constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
operator __pointer_type() const
{ return load(); }
operator __pointer_type() const volatile
{ return load(); }
__pointer_type
operator=(__pointer_type __p)
{
store(__p);
return __p;
}
__pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return __p;
}
__pointer_type
operator++(int)
{ return fetch_add(1); }
__pointer_type
operator++(int) volatile
{ return fetch_add(1); }
__pointer_type
operator--(int)
{ return fetch_sub(1); }
__pointer_type
operator--(int) volatile
{ return fetch_sub(1); }
__pointer_type
operator++()
{ return fetch_add(1) + 1; }
__pointer_type
operator++() volatile
{ return fetch_add(1) + 1; }
__pointer_type
operator--()
{ return fetch_sub(1) -1; }
__pointer_type
operator--() volatile
{ return fetch_sub(1) -1; }
__pointer_type
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
__pointer_type
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
__pointer_type
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
__pointer_type
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
__pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
__pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_p, __d); }
};
} // namespace __atomic2
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_BASE_H
#define _GLIBCXX_ATOMIC_BASE_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/**
* @defgroup atomics Atomics
*
* Components for performing atomic operations.
* @{
*/
/// Enumeration for memory_order
typedef enum memory_order
{
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
inline memory_order
__calculate_memory_order(memory_order __m)
{
const bool __cond1 = __m == memory_order_release;
const bool __cond2 = __m == memory_order_acq_rel;
memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
return __mo2;
}
void
atomic_thread_fence(memory_order);
void
atomic_signal_fence(memory_order);
/// kill_dependency
template<typename _Tp>
inline _Tp
kill_dependency(_Tp __y)
{
_Tp __ret(__y);
return __ret;
}
/**
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
* compatibilty with a C interface, this allows different
* implementations of atomic_flag to use the same atomic operation
* functions, via a standard conversion to the __atomic_flag_base
* argument.
*/
_GLIBCXX_BEGIN_EXTERN_C
struct __atomic_flag_base
{
bool _M_i;
};
_GLIBCXX_END_EXTERN_C
#define ATOMIC_FLAG_INIT { false }
// Base types for atomics.
//
// Three nested namespaces for atomic implementation details.
//
// The nested namespace inlined into std:: is determined by the value
// of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
// ATOMIC_*_LOCK_FREE macros.
//
// 0 == __atomic0 == Never lock-free
// 1 == __atomic1 == Best available, sometimes lock-free
// 2 == __atomic2 == Always lock-free
namespace __atomic0
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic2
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic1
{
using __atomic2::atomic_flag;
using __atomic0::__atomic_base;
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
&& defined(_GLIBCXX_ATOMIC_BUILTINS_4) && defined(_GLIBCXX_ATOMIC_BUILTINS_8)
# define _GLIBCXX_ATOMIC_PROPERTY 2
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic2
#elif defined(_GLIBCXX_ATOMIC_BUILTINS_1)
# define _GLIBCXX_ATOMIC_PROPERTY 1
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic1
#else
# define _GLIBCXX_ATOMIC_PROPERTY 0
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic0
#endif
#define ATOMIC_CHAR_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR16_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR32_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_WCHAR_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_SHORT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline namespace _GLIBCXX_ATOMIC_NAMESPACE { }
#define ATOMIC_VAR_INIT(_VI) { _VI }
template<typename _Tp>
struct atomic;
template<typename _Tp>
struct atomic<_Tp*>;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
...@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer) ...@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer)
if(!(b->flags & B_VALID)) if(!(b->flags & B_VALID))
iderw(b); iderw(b);
if (writer && !origwriter) { if (writer && !origwriter) {
__sync_fetch_and_and(&b->flags, ~B_BUSY); b->flags &= ~B_BUSY;
cv_wakeup(&b->cv); cv_wakeup(&b->cv);
} }
return b; return b;
...@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer) ...@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer)
if (writer) { if (writer) {
if((b->flags & B_BUSY) == 0) if((b->flags & B_BUSY) == 0)
panic("brelse"); panic("brelse");
__sync_fetch_and_and(&b->flags, ~B_BUSY); b->flags &= ~B_BUSY;
cv_wakeup(&b->cv); cv_wakeup(&b->cv);
} }
// rcu_begin_read() happens in bread // rcu_begin_read() happens in bread
......
#include "gc.hh" #include "gc.hh"
#include "atomic.hh"
struct buf : public rcu_freed { struct buf : public rcu_freed {
int flags; atomic<int> flags;
u32 dev; u32 dev;
u64 sector; u64 sector;
struct buf *prev; // LRU cache list struct buf *prev; // LRU cache list
......
...@@ -8,8 +8,8 @@ extern "C" { ...@@ -8,8 +8,8 @@ extern "C" {
#include "proc.h" #include "proc.h"
#include "cpu.h" #include "cpu.h"
} }
#include "gc.hh" #include "gc.hh"
#include "crange.hh"
#include "cpputil.hh" #include "cpputil.hh"
// //
...@@ -57,8 +57,6 @@ extern "C" { ...@@ -57,8 +57,6 @@ extern "C" {
enum { crange_debug = 0 }; enum { crange_debug = 0 };
enum { crange_checking = 0 }; enum { crange_checking = 0 };
void crange_check(struct crange *cr, struct range *absent);
// //
// Methods for ranges // Methods for ranges
// //
...@@ -84,85 +82,90 @@ range_draw_nlevel(int nlevel) ...@@ -84,85 +82,90 @@ range_draw_nlevel(int nlevel)
return l+1; return l+1;
} }
void void range::print(int l)
range_print(struct range *e, int l)
{ {
cprintf ("0x%lx-0x%lx(%lu) 0x%lx, c %d, t %d, n 0x%lx m 0x%lx\n", e->key, e->key+e->size, e->size, (long) e->value, e->curlevel, e->nlevel, (long) e->next, MARKED(e->next[l])); cprintf ("0x%lx-0x%lx(%lu) 0x%lx, c %d, t %d, n 0x%lx m 0x%lx\n", this->key, this->key+this->size, this->size, (long) this->value, this->curlevel, this->nlevel, (long) this->next, MARKED(this->next[l]));
} }
static void range::~range()
range_free(void *p)
{ {
struct range *e = (struct range *) p;
if (crange_debug) if (crange_debug)
cprintf("%d: range_free: 0x%lx 0x%lx-0x%lx(%ld)\n", myproc()->cpuid, (u64) e, e->key, e->key+e->size, e->size); cprintf("%d: range_free: 0x%lx 0x%lx-0x%lx(%ld)\n", myproc()->cpuid, (u64) this, this->key, this->key+this->size, this->size);
crange_check(e->cr, e); //crange_check(e->cr, e);
assert(e->curlevel == -1); // assert(this->curlevel == -1);
for (int l = 0; l < e->nlevel; l++) { for (int l = 0; l < this->nlevel; l++) {
e->next[l] = (struct range *) 0xDEADBEEF; this->next[l] = (struct range *) 0xDEADBEEF;
} }
kmalignfree(e->lock); kmalignfree(this->lock);
kmfree(e->next); kmfree(this->next);
kmalignfree(e); // delete this;
} }
void #if 0
range::do_gc() class range_delayed : public rcu_freed {
{ private:
range_free(this); crange::range *_e;
}
public:
range_delayed(crange::range *e) : rcu_freed("range_delayed"), _e(e) {}
virtual void do_gc() {
range_free(_e);
delete this;
}
};
#endif
static void void range::free_delayed(void)
range_free_delayed(struct range *e)
{ {
if (crange_debug) if (crange_debug)
cprintf("%d: range_free_delayed: 0x%lx 0x%lx-0x%lx(%lu) %lu\n", myproc()->pid, (long) e, e->key, e->key + (e)->size, e->size, myproc()->epoch); cprintf("%d: free_delayed: 0x%lx 0x%lx-0x%lx(%lu) %lu\n", myproc()->pid, (long) this, this->key, this->key + this->size, this->size, myproc()->epoch);
crange_check(e->cr, e); // crange_check(this->cr, e);
assert(e->curlevel == -1); assert(this->curlevel == -1);
gc_delayed(e);
//range_delayed *rd = new range_delayed(e);
gc_delayed(this);
} }
static void void range::dec_ref(void)
range_dec_ref(struct range *e)
{ {
int n = __sync_fetch_and_sub(&(e->curlevel), 1); int n = __sync_fetch_and_sub(&(this->curlevel), 1);
if (n == 0) { // now removed from all levels. if (n == 0) { // now removed from all levels.
range_free_delayed(e); this->free_delayed();
} }
} }
static struct range * range::range(crange *cr, u64 k, u64 sz, void *v, struct range *n, int nlevel = 0)
range_alloc(struct crange *cr, u64 k, u64 sz, void *v, struct range *n) : rcu_freed("range_delayed")
{ {
void *rangemem; //struct range *r;
kmalign(&rangemem, CACHELINE, sizeof(struct range)); //kmalign((void **) &r, CACHELINE, sizeof(struct range));
assert(rangemem); //assert(r);
if (crange_debug)
struct range *r = new(rangemem) range(); cprintf("range:range:: %lu %lu %d\n", k, sz, nlevel);
r->key = k; this->key = k;
r->size = sz; this->size = sz;
r->value = v; this->value = v;
assert(r->size > 0); this->cr = cr;
assert(cr->nlevel > 0); assert(cr->nlevel > 0);
r->curlevel = 0; this->curlevel = 0;
r->nlevel = range_draw_nlevel(cr->nlevel); if (nlevel == 0) this->nlevel = range_draw_nlevel(cr->nlevel);
r->next = (struct range**) kmalloc(sizeof(sizeof(r->next[0])) * r->nlevel); // cache align? else this->nlevel = nlevel;
assert(r->next); this->next = (struct range**) kmalloc(sizeof(sizeof(this->next[0])) * this->nlevel); // cache align?
r->next[0] = n; assert(this->next);
for (int l = 1; l < r->nlevel; l++) r->next[l] = 0; this->next[0] = n;
assert(kmalign((void **) &r->lock, CACHELINE, for (int l = 1; l < this->nlevel; l++) this->next[l] = 0;
assert(kmalign((void **) &this->lock, CACHELINE,
sizeof(struct spinlock)) == 0); sizeof(struct spinlock)) == 0);
initlock(r->lock, "crange", LOCKSTAT_CRANGE); initlock(this->lock, "crange", LOCKSTAT_CRANGE);
r->cr = cr; //r->cr = cr;
return r; //return r;
} }
// //
// Methods on a sequence (i.e., ordered list) of ranges // Methods on a sequence (i.e., ordered list) of ranges
// //
static struct range * static range *insert(struct range *head, struct range *r)
range_insert(struct range *head, struct range *r)
{ {
struct range *n, *p; struct range *n, *p;
p = nullptr; p = nullptr;
...@@ -182,17 +185,16 @@ range_insert(struct range *head, struct range *r) ...@@ -182,17 +185,16 @@ range_insert(struct range *head, struct range *r)
return head; return head;
} }
// lock p if p->next == e and p isn't marked for deletion. if not, return failure. // lock p if this->next == e and p isn't marked for deletion. if not, return failure.
static int int range::lockif(range *e)
range_lock_pred(struct range *p, struct range *e)
{ {
assert(!MARKED(e)); assert(!MARKED(e));
acquire(p->lock); acquire(this->lock);
if (p->next[0] == e) { if (this->next[0] == e) {
return 1; return 1;
} }
release(p->lock); release(this->lock);
// cprintf("%d: range_lock_pred: retry %u\n", mycpu()->id, p->key); // cprintf("%d: range_lock_pred: retry %u\n", mycpu()->id, this->key);
return 0; return 0;
} }
...@@ -202,8 +204,7 @@ range_lock_pred(struct range *p, struct range *e) ...@@ -202,8 +204,7 @@ range_lock_pred(struct range *p, struct range *e)
// causing curlevel to drop below nlevel, and causing add_index to add the // causing curlevel to drop below nlevel, and causing add_index to add the
// node back on level on which it already has been inserted (because it hasn't // node back on level on which it already has been inserted (because it hasn't
// been marked deleted yet at that level). // been marked deleted yet at that level).
static void static void mark(range *f, range *s)
range_mark(struct range *f, struct range *s)
{ {
struct range *e; struct range *e;
for (e = f; e && e != s; e = WOMARK(e->next[0])) { for (e = f; e && e != s; e = WOMARK(e->next[0])) {
...@@ -215,8 +216,7 @@ range_mark(struct range *f, struct range *s) ...@@ -215,8 +216,7 @@ range_mark(struct range *f, struct range *s)
} }
// Unlock ranges f through l // Unlock ranges f through l
static void static void unlockn(range *f, range *l)
range_unlockn(struct range *f, struct range *l)
{ {
struct range *e; struct range *e;
for (e = f; e != l; e = WOMARK(e->next[0])) { for (e = f; e != l; e = WOMARK(e->next[0])) {
...@@ -227,64 +227,61 @@ range_unlockn(struct range *f, struct range *l) ...@@ -227,64 +227,61 @@ range_unlockn(struct range *f, struct range *l)
} }
// Delay free ranges f through l // Delay free ranges f through l
static void static void freen(struct range *f, struct range *l)
range_freen(struct range *f, struct range *l)
{ {
struct range *e; struct range *e;
for (e = f; e != l; e = WOMARK(e->next[0])) { for (e = f; e != l; e = WOMARK(e->next[0])) {
assert(e); assert(e);
assert(e->curlevel >= 0); assert(e->curlevel >= 0);
range_dec_ref(e); e->dec_ref();
} }
if (l) { if (l) {
assert(e->curlevel >= 0); assert(e->curlevel >= 0);
range_dec_ref(e); e->dec_ref();
} }
} }
// Compute the sequence that will replace the to-be deleted sequence. Make copies to create // Compute the sequence that will replace the to-be deleted sequence. Make copies to create
// the new ranges, because readers may running through the list and looking at the old ranges. // the new ranges, because readers may running through the list and looking at the old ranges.
// If the whole sequence is replaced, it will return s. // If the whole sequence is replaced, it will return s.
static struct range * static range *replace(u64 k, u64 sz, void *v, range *f, range *l, range *s)
crange_replace(u64 k, u64 sz, void *v, struct range *f, struct range *l,
struct range *s)
{ {
struct range *r; range *r;
if (f == l) { // the first range covers the range to be deleted if (f == l) { // the first range covers the range to be deleted
if (k <= f->key && f->key + f->size <= k + sz) { // range sequence covers the first range if (k <= f->key && f->key + f->size <= k + sz) { // range sequence covers the first range
r = s; r = s;
} else { } else {
if (f->key < k && k+sz < f->key + f->size) { // split range? if (f->key < k && k+sz < f->key + f->size) { // split range?
struct range *right = range_alloc(f->cr, k+sz, f->key+f->size-k-sz, v, s); range *right = new range(f->cr, k+sz, f->key+f->size-k-sz, v, s);
struct range *left = range_alloc(f->cr, f->key, k-f->key, v, right); range *left = new range(f->cr, f->key, k-f->key, v, right);
r = left; r = left;
} else if (k <= f->key) { // cut front? } else if (k <= f->key) { // cut front?
assert(k+sz <= f->key + f->size); assert(k+sz <= f->key + f->size);
r = range_alloc(f->cr, k+sz, f->key + f->size - k - sz, v, f->next[0]); r = new range(f->cr, k+sz, f->key + f->size - k - sz, v, f->next[0]);
} else { // cut end } else { // cut end
assert(k > f->key); assert(k > f->key);
r = range_alloc(f->cr, f->key, k - f->key, v, f->next[0]); r = new range(f->cr, f->key, k - f->key, v, f->next[0]);
} }
} }
} else if (k <= f->key && k + sz >= l->key + l->size) { // delete complete range? } else if (k <= f->key && k + sz >= l->key + l->size) { // delete complete range?
r = s; r = s;
} else { // first range covers part and last range other part? } else { // first range covers part and last range other part?
struct range *left; range *left;
struct range *right; range *right;
// cprintf("f 0x%lx [%d, %d) l 0x%lx [%d, %d)\n", (long) f, f->key, f->key+f->size, (long) l, l->key, l->key+l->size); // cprintf("f 0x%lx [%d, %d) l 0x%lx [%d, %d)\n", (long) f, f->key, f->key+f->size, (long) l, l->key, l->key+l->size);
if (k <= f->key && k + sz >= f->key + f->size) { // delete first range? if (k <= f->key && k + sz >= f->key + f->size) { // delete first range?
left = nullptr; left = nullptr;
} else { } else {
assert(k > f->key); assert(k > f->key);
left = range_alloc(f->cr, f->key, k - f->key, v, 0); left = new range(f->cr, f->key, k - f->key, v, 0);
} }
if (k + sz >= l->key + l->size) { // delete last range? if (k + sz >= l->key + l->size) { // delete last range?
right = nullptr; right = nullptr;
} else { } else {
assert(k+sz > l->key); assert(k+sz > l->key);
assert(l->key + l->size >= k + sz); assert(l->key + l->size >= k + sz);
right = range_alloc(f->cr, k+sz, l->key+l->size - k - sz, v, s); right = new range(f->cr, k+sz, l->key+l->size - k - sz, v, s);
} }
r = left ? left : right; r = left ? left : right;
if (left) left->next[0] = right ? right : s; if (left) left->next[0] = right ? right : s;
...@@ -297,65 +294,61 @@ crange_replace(u64 k, u64 sz, void *v, struct range *f, struct range *l, ...@@ -297,65 +294,61 @@ crange_replace(u64 k, u64 sz, void *v, struct range *f, struct range *l,
// //
void void
crange_print(struct crange *cr, int full) crange::print(int full)
{ {
struct range *e; struct range *e;
for (int l = 0; l < cr->nlevel; l++) { for (int l = 0; l < this->nlevel; l++) {
int c = 0; int c = 0;
cprintf("crange %d: ", l); cprintf("crange %d: ", l);
for (e = cr->crange_head.next[l]; e; e = WOMARK(e->next[l])) { for (e = this->crange_head->next[l]; e; e = WOMARK(e->next[l])) {
c++; c++;
if (full) range_print(e, l); if (full) e->print(l);
} }
cprintf(" cnt %d \n", c); cprintf(" cnt %d \n", c);
} }
} }
struct crange* crange::crange(int nlevel)
crange_alloc(int nlevel)
{ {
struct crange *cr; // assert(kmalign((void **) &cr, CACHELINE, sizeof(struct crange)) == 0);
assert(kmalign((void **) &cr, CACHELINE, sizeof(struct crange)) == 0);
assert(nlevel >= 0); assert(nlevel >= 0);
cr->nlevel = nlevel; this->nlevel = nlevel;
cr->crange_head.cr = cr; this->crange_head = new range(this, 0, 0, nullptr, nullptr, nlevel);
cr->crange_head.key = 0; #if 0
cr->crange_head.size = 0; assert(kmalign((void **) &cr->crange_head->lock,
assert(kmalign((void **) &cr->crange_head.lock,
CACHELINE, sizeof(struct spinlock)) == 0); CACHELINE, sizeof(struct spinlock)) == 0);
initlock(cr->crange_head.lock, "head lock", LOCKSTAT_CRANGE); initlock(cr->crange_head->lock, "head lock", LOCKSTAT_CRANGE);
cr->crange_head.next = (struct range **) kmalloc(sizeof(cr->crange_head.next[0]) * nlevel); cr->crange_head.next = (struct range **) kmalloc(sizeof(cr->crange_head.next[0]) * nlevel);
for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0; for (int l = 0; l < nlevel; l++) cr->crange_head.next[l] = 0;
if (crange_debug) cprintf("crange_alloc: return 0x%lx\n", (u64) cr); #endif
return cr; if (crange_debug) cprintf("crange::crange return 0x%lx\n", (u64) this);
// this->print(1);
} }
void crange::~crange()
crange_free(struct crange *cr)
{ {
assert(cr); if (crange_debug) cprintf("crange_free: 0x%lx\n", (u64) this);
if (crange_debug) cprintf("crange_free: 0x%lx\n", (u64) cr); range *e, *n;
struct range *e, *n; for (e = WOMARK(this->crange_head->next[0]); e; e = n) {
for (e = WOMARK(cr->crange_head.next[0]); e; e = n) {
n = WOMARK(e->next[0]); n = WOMARK(e->next[0]);
range_free(e); delete e;
} }
kmfree(cr->crange_head.next); delete this->crange_head;
kmalignfree(cr->crange_head.lock); // kmfree(this->crange_head->next);
kmalignfree(cr); // kmalignfree(this->crange_head.lock);
// kmalignfree(cr);
} }
// Check some invariants, ignoring marked nodes. // Check some invariants, ignoring marked nodes.
void void crange::check(struct range *absent)
crange_check(struct crange *cr, struct range *absent)
{ {
if (!crange_checking) if (!crange_checking)
return; return;
int t = mycpu()->id; int t = mycpu()->id;
struct range *e, *s; struct range *e, *s;
for (int l = 0; l < cr->nlevel; l++) { for (int l = 0; l < this->nlevel; l++) {
for (e = cr->crange_head.next[l]; e; e = s) { for (e = this->crange_head->next[l]; e; e = s) {
assert(e->curlevel < cr->nlevel); assert(e->curlevel < this->nlevel);
if (absent == e) { if (absent == e) {
cprintf("%d: check level failed; 0x%lx is present\n", l, (u64) absent); cprintf("%d: check level failed; 0x%lx is present\n", l, (u64) absent);
assert(0); assert(0);
...@@ -363,14 +356,14 @@ crange_check(struct crange *cr, struct range *absent) ...@@ -363,14 +356,14 @@ crange_check(struct crange *cr, struct range *absent)
// look for e level down, but only for non-marked nodes. // look for e level down, but only for non-marked nodes.
if (l > 0 && e->next[l] != 0 && !MARKED(e->next[l])) { if (l > 0 && e->next[l] != 0 && !MARKED(e->next[l])) {
struct range *n; struct range *n;
for (n = WOMARK(cr->crange_head.next[l-1]); n && n != e; n = WOMARK(n->next[l-1])) for (n = WOMARK(this->crange_head->next[l-1]); n && n != e; n = WOMARK(n->next[l-1]))
; ;
__sync_synchronize(); __sync_synchronize();
// if e is marked now, skip the check (the memory barrier ensures that we reread it // if e is marked now, skip the check (the memory barrier ensures that we reread it
// from memory (and not from a register) // from memory (and not from a register)
if (!MARKED(e->next[l]) && n != e) { if (!MARKED(e->next[l]) && n != e) {
cprintf("%d: check level %d failed 0x%lx-0x%lx(%lu) m %lu c %d t %d; in high level but not low\n", t, l, e->key, e->key+e->size, e->size, MARKED(e->next[l]), e->curlevel, e->nlevel); cprintf("%d: check level %d failed 0x%lx-0x%lx(%lu) m %lu c %d t %d; in high level but not low\n", t, l, e->key, e->key+e->size, e->size, MARKED(e->next[l]), e->curlevel, e->nlevel);
crange_print(cr, 1); this->print(1);
assert(0); assert(0);
} }
} }
...@@ -379,7 +372,7 @@ crange_check(struct crange *cr, struct range *absent) ...@@ -379,7 +372,7 @@ crange_check(struct crange *cr, struct range *absent)
assert(s != e); assert(s != e);
if (!MARKED(e->next[l]) && s && (e->key + e->size > s->key)) { if (!MARKED(e->next[l]) && s && (e->key + e->size > s->key)) {
if (crange_debug) cprintf("%d: e(%lu,%lu) overlaps with s(%lu,%lu)\n", t, e->key, e->size, s->key, e->size); if (crange_debug) cprintf("%d: e(%lu,%lu) overlaps with s(%lu,%lu)\n", t, e->key, e->size, s->key, e->size);
crange_print(cr, 1); this->print(1);
assert(0); assert(0);
} }
} }
...@@ -389,8 +382,7 @@ crange_check(struct crange *cr, struct range *absent) ...@@ -389,8 +382,7 @@ crange_check(struct crange *cr, struct range *absent)
// Remove e from index, if marked for deletion. Returns 1 if e isn't marked. // Remove e from index, if marked for deletion. Returns 1 if e isn't marked.
// Returns 0, if marked but on level 0. Returns -1 if remove fails. // Returns 0, if marked but on level 0. Returns -1 if remove fails.
// Returns 1 on success. Tricky because of races between add and del. // Returns 1 on success. Tricky because of races between add and del.
static int int crange::del_index(range *p0, range **e, int l)
crange_del_index(struct crange *cr, struct range *p0, struct range **e, int l)
{ {
int r = 1; int r = 1;
assert(l < (*e)->nlevel); assert(l < (*e)->nlevel);
...@@ -411,7 +403,7 @@ crange_del_index(struct crange *cr, struct range *p0, struct range **e, int l) ...@@ -411,7 +403,7 @@ crange_del_index(struct crange *cr, struct range *p0, struct range **e, int l)
int cas = __sync_bool_compare_and_swap(&(p0->next[l]), *e, WOMARK((*e)->next[l])); int cas = __sync_bool_compare_and_swap(&(p0->next[l]), *e, WOMARK((*e)->next[l]));
if (cas) { if (cas) {
assert((*e)->curlevel >= 0); assert((*e)->curlevel >= 0);
range_dec_ref(*e); (*e)->dec_ref();
*e = WOMARK((*e)->next[l]); *e = WOMARK((*e)->next[l]);
} else { } else {
// cprintf("%d: crange_del_index: retry del %u(%u)\n", mycpu()->id, (*e)->key, (*e)->key + (*e)->size); // cprintf("%d: crange_del_index: retry del %u(%u)\n", mycpu()->id, (*e)->key, (*e)->key + (*e)->size);
...@@ -421,14 +413,13 @@ crange_del_index(struct crange *cr, struct range *p0, struct range **e, int l) ...@@ -421,14 +413,13 @@ crange_del_index(struct crange *cr, struct range *p0, struct range **e, int l)
} }
} }
done: done:
crange_check(cr, nullptr); this->check(nullptr);
return r; return r;
} }
// Insert e into index one level up, between p and s, if e hasn't been inserted // Insert e into index one level up, between p and s, if e hasn't been inserted
// yet on that level. // yet on that level.
static void void crange::add_index(int l, range *e, range *p1, range *s1)
crange_add_index(struct crange *cr, int l, struct range *e, struct range *p1, struct range *s1)
{ {
if (l >= e->nlevel-1) return; if (l >= e->nlevel-1) return;
if (MARKED(e->next[l+1])) return; if (MARKED(e->next[l+1])) return;
...@@ -456,14 +447,12 @@ crange_add_index(struct crange *cr, int l, struct range *e, struct range *p1, st ...@@ -456,14 +447,12 @@ crange_add_index(struct crange *cr, int l, struct range *e, struct range *p1, st
} }
} }
done: done:
crange_check(cr, nullptr); this->check(nullptr);
} }
// Given the range that starts the sequence, find all other ranges part of sequence and lock them, // Given the range that starts the sequence, find all other ranges part of sequence and lock them,
// if l == 0 // if l == 0
static int int crange::lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr)
crange_lock_range(u64 k, u64 sz, int l, struct range **er, struct range **pr,
struct range **fr, struct range **lr, struct range **sr)
{ {
struct range *e = *er; struct range *e = *er;
assert(*pr != e); assert(*pr != e);
...@@ -471,7 +460,7 @@ crange_lock_range(u64 k, u64 sz, int l, struct range **er, struct range **pr, ...@@ -471,7 +460,7 @@ crange_lock_range(u64 k, u64 sz, int l, struct range **er, struct range **pr,
*lr = e; *lr = e;
if (l == 0) { if (l == 0) {
// lock p, if still pointing to e (at the bottom level) // lock p, if still pointing to e (at the bottom level)
if (!range_lock_pred(*pr, e)) if (!(*pr)->lockif(e))
return 0; return 0;
// locked p and e; we are in business // locked p and e; we are in business
} }
...@@ -492,9 +481,7 @@ crange_lock_range(u64 k, u64 sz, int l, struct range **er, struct range **pr, ...@@ -492,9 +481,7 @@ crange_lock_range(u64 k, u64 sz, int l, struct range **er, struct range **pr,
// finds and locks all ranges in sequence [k, sz). Also, returns predecessors // finds and locks all ranges in sequence [k, sz). Also, returns predecessors
// and successors. Locks pred and ranges in bottom list. If range_lock_pred() // and successors. Locks pred and ranges in bottom list. If range_lock_pred()
// fails, search again. // fails, search again.
static int int crange::find_and_lock(u64 k, u64 sz, range **p0, range **f0, range **l0, range **s0)
crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0,
struct range **f0, struct range **l0, struct range **s0)
{ {
struct range *p1, *s1; struct range *p1, *s1;
struct range *e; struct range *e;
...@@ -502,24 +489,24 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0, ...@@ -502,24 +489,24 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0,
retry: retry:
*p0 = nullptr; *p0 = nullptr;
*s0 = nullptr; *s0 = nullptr;
for (int l = cr->nlevel-1; l >= 0; l--) { for (int l = this->nlevel-1; l >= 0; l--) {
*f0 = nullptr; *f0 = nullptr;
*l0 = nullptr; *l0 = nullptr;
p1 = *p0; // remember last previous (p0) as the previous one level up (p1) p1 = *p0; // remember last previous (p0) as the previous one level up (p1)
*p0 = (l == cr->nlevel-1) ? &cr->crange_head : p1; // set current previous *p0 = (l == this->nlevel-1) ? this->crange_head : p1; // set current previous
s1 = *s0; s1 = *s0;
for (e = WOMARK((*p0)->next[l]); e; *p0 = e, e = WOMARK(e->next[l])) { for (e = WOMARK((*p0)->next[l]); e; *p0 = e, e = WOMARK(e->next[l])) {
assert(l < e->nlevel); assert(l < e->nlevel);
int r = crange_del_index(cr, *p0, &e, l); int r = this->del_index(*p0, &e, l);
if (r == -1) goto retry; // deletion failed because some other core did it; try again if (r == -1) goto retry; // deletion failed because some other core did it; try again
if (r == 0) continue; // range was marked but we are level 0, skip it; lock holder will remove if (r == 0) continue; // range was marked but we are level 0, skip it; lock holder will remove
if (e == 0) break; // all ranges on this level were removed if (e == 0) break; // all ranges on this level were removed
if (k >= e->key+e->size) { // is e before k? if (k >= e->key+e->size) { // is e before k?
crange_add_index(cr, l, e, p1, s1); // maybe add to index this->add_index(l, e, p1, s1); // maybe add to index
continue; continue;
} }
if (range_intersect(k, sz, e->key, e->size)) { // first range of sequence? if (range_intersect(k, sz, e->key, e->size)) { // first range of sequence?
if (!crange_lock_range(k, sz, l, &e, p0, f0, l0, s0)) { if (!this->lock_range(k, sz, l, &e, p0, f0, l0, s0)) {
// INCRETRY; // INCRETRY;
goto retry; goto retry;
} }
...@@ -531,7 +518,7 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0, ...@@ -531,7 +518,7 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0,
} }
} }
if (*f0 == nullptr) { // range isn't present, lock predecessor of key if (*f0 == nullptr) { // range isn't present, lock predecessor of key
if (!range_lock_pred(*p0, *s0)) { if (!(*p0)->lockif(*s0)) {
//INCRETRY; //INCRETRY;
goto retry; goto retry;
} }
...@@ -547,17 +534,16 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0, ...@@ -547,17 +534,16 @@ crange_find_and_lock(struct crange *cr, u64 k, u64 sz, struct range **p0,
// Search through the crange skip list for a range that intersects with [k, sz) // Search through the crange skip list for a range that intersects with [k, sz)
// return that range. Pretend that marked ranges don't exist. // return that range. Pretend that marked ranges don't exist.
struct range* range* crange::search(u64 k, u64 sz, int mod)
crange_search(struct crange *cr, u64 k, u64 sz, int mod)
{ {
struct range *p, *e, *r; struct range *p, *e, *r;
int n = (mod) ? range_draw_nlevel(cr->nlevel) : 0; int n = (mod) ? range_draw_nlevel(this->nlevel) : 0;
gc_begin_epoch(); gc_begin_epoch();
//read_counters(myproc()->cpuid, 0); //read_counters(myproc()->cpuid, 0);
if (crange_debug) cprintf("crange_search: 0x%lx 0x%lx\n", (u64) cr, k); if (crange_debug) cprintf("crange_search: 0x%lx 0x%lx\n", (u64) this, k);
r = nullptr; r = nullptr;
p = &cr->crange_head; p = this->crange_head;
for (int l = cr->nlevel-1; l >= 0; l--) { for (int l = this->nlevel-1; l >= 0; l--) {
for (e = WOMARK(p->next[l]); e; p = e, e = WOMARK(e->next[l])) { for (e = WOMARK(p->next[l]); e; p = e, e = WOMARK(e->next[l])) {
if (crange_debug) if (crange_debug)
cprintf("level %d: 0x%lx 0x%lx-%lx(%lu) 0x%lx-0x%lx(%lu)\n", l, (u64) p, p->key, p->key+p->size, p->size, e->key, e->key+e->size, e->size); cprintf("level %d: 0x%lx 0x%lx-%lx(%lu) 0x%lx-0x%lx(%lu)\n", l, (u64) p, p->key, p->key+p->size, p->size, e->key, e->key+e->size, e->size);
...@@ -583,13 +569,13 @@ crange_search(struct crange *cr, u64 k, u64 sz, int mod) ...@@ -583,13 +569,13 @@ crange_search(struct crange *cr, u64 k, u64 sz, int mod)
end: end:
//read_counters(myproc()->cpuid, 1); //read_counters(myproc()->cpuid, 1);
gc_end_epoch(); gc_end_epoch();
// cprintf("crange_search: 0x%x return (0x%lx,0x%lx)\n", cr, r? r->key : 0, r? r->size : 0); if (crange_debug)
cprintf("crange_search: 0x%lx return (0x%lx,0x%lx)\n", (u64) this, r? r->key : 0, r? r->size : 0);
return r; return r;
} }
// delete the range [k, k+sz). compute the replacement list and then hook it in atomically. // delete the range [k, k+sz). compute the replacement list and then hook it in atomically.
void void crange::del(u64 k, u64 sz)
crange_del(struct crange *cr, u64 k, u64 sz)
{ {
struct range *prev; struct range *prev;
struct range *succ; struct range *succ;
...@@ -597,31 +583,31 @@ crange_del(struct crange *cr, u64 k, u64 sz) ...@@ -597,31 +583,31 @@ crange_del(struct crange *cr, u64 k, u64 sz)
struct range *last; struct range *last;
struct range *repl = nullptr; struct range *repl = nullptr;
assert(cr); assert(this);
gc_begin_epoch(); gc_begin_epoch();
if (crange_debug) if (crange_debug)
cprintf("crange_del: 0x%lx 0x%lx-0x%lx(%ld)\n", (u64) cr, k, k+sz, sz); cprintf("crange_del: 0x%lx 0x%lx-0x%lx(%ld)\n", (u64) this, k, k+sz, sz);
if (!crange_find_and_lock(cr, k, sz, &prev, &first, &last, &succ)) { // done? if (!this->find_and_lock(k, sz, &prev, &first, &last, &succ)) { // done?
if (crange_debug) cprintf("crange_del: [0x%lx,0x%lx) not present\n", k, sz); if (crange_debug) cprintf("crange_del: [0x%lx,0x%lx) not present\n", k, sz);
release(prev->lock); release(prev->lock);
goto done; goto done;
} }
repl = crange_replace(k, sz, nullptr, first, last, succ); repl = replace(k, sz, nullptr, first, last, succ);
range_mark(first, succ); mark(first, succ);
while (1) { while (1) {
// hook new list into bottom list; if del resulted in a new list, use that (repl), otherwise // hook new list into bottom list; if del resulted in a new list, use that (repl), otherwise
// set predecessor to successor. // set predecessor to successor.
if (__sync_bool_compare_and_swap(&(prev->next[0]), first, repl ? repl : succ)) { if (__sync_bool_compare_and_swap(&(prev->next[0]), first, repl ? repl : succ)) {
release(prev->lock); release(prev->lock);
range_freen(first, last); // put on delayed list before unlocking freen(first, last); // put on delayed list before unlocking
range_unlockn(first, last); unlockn(first, last);
break; break;
} }
cprintf("crange_del(%lu, %lu): prev was updated; try again\n", k, sz); cprintf("crange_del(%lu, %lu): prev was updated; try again\n", k, sz);
assert(0); assert(0);
} }
done: done:
crange_check(cr, nullptr); this->check(nullptr);
// cprintf("%d: crange_del(0x%lx, 0x%lx):\n", mycpu()->id, k, sz); crange_print(cr, 1); // cprintf("%d: crange_del(0x%lx, 0x%lx):\n", mycpu()->id, k, sz); crange_print(cr, 1);
gc_end_epoch(); gc_end_epoch();
} }
...@@ -629,8 +615,7 @@ crange_del(struct crange *cr, u64 k, u64 sz) ...@@ -629,8 +615,7 @@ crange_del(struct crange *cr, u64 k, u64 sz)
// add the range [k, sz), which causes ranges to be deleted, if the range overlaps an // add the range [k, sz), which causes ranges to be deleted, if the range overlaps an
// existing range. we compute the replacement list and then hook it atomically. // existing range. we compute the replacement list and then hook it atomically.
void void crange::add(u64 k, u64 sz, void *v)
crange_add(struct crange *cr, u64 k, u64 sz, void *v)
{ {
struct range *r; struct range *r;
struct range *first; struct range *first;
...@@ -639,38 +624,36 @@ crange_add(struct crange *cr, u64 k, u64 sz, void *v) ...@@ -639,38 +624,36 @@ crange_add(struct crange *cr, u64 k, u64 sz, void *v)
struct range *succ; struct range *succ;
struct range *repl = nullptr; struct range *repl = nullptr;
if (crange_debug) cprintf("crange_add: 0x%lx 0x%lx-0x%lx(%lu)\n", (u64) cr, k, k+sz, sz); if (crange_debug) cprintf("crange_add: 0x%lx 0x%lx-0x%lx(%lu)\n", (u64) this, k, k+sz, sz);
assert(cr); assert(this);
gc_begin_epoch(); gc_begin_epoch();
if (crange_find_and_lock(cr, k, sz, &prev, &first, &last, &succ)) { if (this->find_and_lock(k, sz, &prev, &first, &last, &succ)) {
if (crange_debug) cprintf("crange_add(0x%lx,0x%lx) overlaps with [0x%lx,0x%lx)\n", k, sz, first->key, first->size); if (crange_debug) cprintf("crange_add(0x%lx,0x%lx) overlaps with [0x%lx,0x%lx)\n", k, sz, first->key, first->size);
repl = crange_replace(k, sz, v, first, last, succ); repl = replace(k, sz, v, first, last, succ);
} else { } else {
repl = succ; repl = succ;
} }
r = range_alloc(cr, k, sz, v, succ); r = new range(this, k, sz, v, succ);
repl = range_insert(repl, r); repl = insert(repl, r);
range_mark(first, succ); mark(first, succ);
if (prev) if (prev)
assert(!MARKED(prev->next[0])); assert(!MARKED(prev->next[0]));
if (__sync_bool_compare_and_swap(&(prev->next[0]), first ? first : succ, repl)) { if (__sync_bool_compare_and_swap(&(prev->next[0]), first ? first : succ, repl)) {
release(prev->lock); release(prev->lock);
range_freen(first, last); // put on delayed list before unlocking freen(first, last); // put on delayed list before unlocking
range_unlockn(first, last); unlockn(first, last);
} else { } else {
assert(0); assert(0);
} }
// cprintf("crange_add(0x%lx,0x%lx):\n", k, sz); crange_print(cr, 1); // cprintf("crange_add(0x%lx,0x%lx):\n", k, sz); crange_print(cr, 1);
crange_check(cr, nullptr); this->check(nullptr);
gc_end_epoch(); gc_end_epoch();
} }
int int crange::foreach(int (*cb)(range *r, void *), void *st)
crange_foreach(struct crange *cr, int (*cb)(struct range *r, void *), void *st)
{ {
struct range *e; struct range *e;
assert(cr); for (e = WOMARK(this->crange_head->next[0]); e; e = WOMARK(e->next[0])) {
for (e = WOMARK(cr->crange_head.next[0]); e; e = WOMARK(e->next[0])) {
if (!cb(e, st)) if (!cb(e, st))
return 0; return 0;
} }
......
#pragma once
class crange;
class range : public rcu_freed {
public:
u64 key;
u64 size;
void *value;
int curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
crange *cr; // the crange this range is part of
struct range** next; // one next pointer per level
struct spinlock *lock; // on separate cache line?
range(crange *cr, u64 k, u64 sz, void *v, struct range *n, int nlevel);
~range();
virtual void do_gc() {
delete this;
}
void print(int l);
void free_delayed();
void dec_ref(void);
int lockif(range *e);
} __mpalign__;
class crange {
range *crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
crange_check(struct range *absent);
crange_replace(u64, u64, void*, range*, range*, range*);
public:
int nlevel; // number of levels in the crange skip list
crange(int nlevel);
~crange(void);
void del(u64 k, u64 sz);
void add(u64 k, u64 sz, void *v);
range* search(u64 k, u64 sz, int mod);
int foreach(int (*cb)(range *r, void *), void *st);
void print(int);
void check(struct range *absent);
int del_index(range *p0, range **e, int l);
void add_index(int l, range *e, range *p1, range *s1);
int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
int find_and_lock(u64 k, u64 sz, range **p0, range **f0, range **l0, range **s0);
};
...@@ -28,7 +28,7 @@ filealloc(void) ...@@ -28,7 +28,7 @@ filealloc(void)
struct file* struct file*
filedup(struct file *f) filedup(struct file *f)
{ {
if (__sync_fetch_and_add(&f->ref, 1) < 1) if (f->ref++ < 1)
panic("filedup"); panic("filedup");
return f; return f;
} }
...@@ -37,7 +37,7 @@ filedup(struct file *f) ...@@ -37,7 +37,7 @@ filedup(struct file *f)
void void
fileclose(struct file *f) fileclose(struct file *f)
{ {
if (subfetch(&f->ref, 1) > 0) if (--f->ref > 0)
return; return;
if(f->type == file::FD_PIPE) if(f->type == file::FD_PIPE)
......
#include "cpputil.hh" #include "cpputil.hh"
#include "ns.hh" #include "ns.hh"
#include "gc.hh" #include "gc.hh"
#include "atomic.hh"
u64 namehash(const strbuf<DIRSIZ>&); u64 namehash(const strbuf<DIRSIZ>&);
struct file { struct file {
enum { FD_NONE, FD_PIPE, FD_INODE, FD_SOCKET } type; enum { FD_NONE, FD_PIPE, FD_INODE, FD_SOCKET } type;
int ref; // reference count atomic<int> ref; // reference count
char readable; char readable;
char writable; char writable;
...@@ -23,9 +24,9 @@ struct inode : public rcu_freed { ...@@ -23,9 +24,9 @@ struct inode : public rcu_freed {
u32 dev; // Device number u32 dev; // Device number
u32 inum; // Inode number u32 inum; // Inode number
u32 gen; // Generation number u32 gen; // Generation number
int ref; // Reference count atomic<int> ref; // Reference count
int flags; // I_BUSY, I_VALID int flags; // I_BUSY, I_VALID
int readbusy; atomic<int> readbusy;
struct condvar cv; struct condvar cv;
struct spinlock lock; struct spinlock lock;
char lockname[16]; char lockname[16];
......
...@@ -249,10 +249,10 @@ iget(u32 dev, u32 inum) ...@@ -249,10 +249,10 @@ iget(u32 dev, u32 inum)
ip = ins->lookup(mkpair(dev, inum)); ip = ins->lookup(mkpair(dev, inum));
if (ip) { if (ip) {
// tricky: first bump ref, then check free flag // tricky: first bump ref, then check free flag
__sync_fetch_and_add(&ip->ref, 1); ip->ref++;
if (ip->flags & I_FREE) { if (ip->flags & I_FREE) {
gc_end_epoch(); gc_end_epoch();
__sync_sub_and_fetch(&ip->ref, 1); ip->ref--;
goto retry; goto retry;
} }
gc_end_epoch(); gc_end_epoch();
...@@ -339,7 +339,7 @@ iget(u32 dev, u32 inum) ...@@ -339,7 +339,7 @@ iget(u32 dev, u32 inum)
struct inode* struct inode*
idup(struct inode *ip) idup(struct inode *ip)
{ {
__sync_fetch_and_add(&ip->ref, 1); ip->ref++;
return ip; return ip;
} }
...@@ -357,7 +357,7 @@ ilock(struct inode *ip, int writer) ...@@ -357,7 +357,7 @@ ilock(struct inode *ip, int writer)
while(ip->flags & (I_BUSYW | (writer ? I_BUSYR : 0))) while(ip->flags & (I_BUSYW | (writer ? I_BUSYR : 0)))
cv_sleep(&ip->cv, &ip->lock); cv_sleep(&ip->cv, &ip->lock);
ip->flags |= I_BUSYR | (writer ? I_BUSYW : 0); ip->flags |= I_BUSYR | (writer ? I_BUSYW : 0);
__sync_fetch_and_add(&ip->readbusy, 1); ip->readbusy++;
release(&ip->lock); release(&ip->lock);
if((ip->flags & I_VALID) == 0) if((ip->flags & I_VALID) == 0)
...@@ -372,7 +372,7 @@ iunlock(struct inode *ip) ...@@ -372,7 +372,7 @@ iunlock(struct inode *ip)
panic("iunlock"); panic("iunlock");
acquire(&ip->lock); acquire(&ip->lock);
int lastreader = __sync_sub_and_fetch(&ip->readbusy, 1); int lastreader = (--ip->readbusy);
ip->flags &= ~(I_BUSYW | ((lastreader==0) ? I_BUSYR : 0)); ip->flags &= ~(I_BUSYW | ((lastreader==0) ? I_BUSYR : 0));
cv_wakeup(&ip->cv); cv_wakeup(&ip->cv);
release(&ip->lock); release(&ip->lock);
...@@ -382,7 +382,7 @@ iunlock(struct inode *ip) ...@@ -382,7 +382,7 @@ iunlock(struct inode *ip)
void void
iput(struct inode *ip) iput(struct inode *ip)
{ {
if(__sync_sub_and_fetch(&ip->ref, 1) == 0) { if(--ip->ref == 0) {
if (ip->nlink) if (ip->nlink)
return; return;
acquire(&ip->lock); acquire(&ip->lock);
...@@ -407,7 +407,7 @@ iput(struct inode *ip) ...@@ -407,7 +407,7 @@ iput(struct inode *ip)
} }
ip->flags |= (I_BUSYR | I_BUSYW); ip->flags |= (I_BUSYR | I_BUSYW);
__sync_fetch_and_add(&ip->readbusy, 1); ip->readbusy++;
release(&ip->lock); release(&ip->lock);
......
...@@ -11,6 +11,8 @@ extern "C" { ...@@ -11,6 +11,8 @@ extern "C" {
} }
#include "ns.hh" #include "ns.hh"
#include "atomic.hh"
extern u64 proc_hash(const u32&); extern u64 proc_hash(const u32&);
extern xns<u32, proc*, proc_hash> *xnspid; extern xns<u32, proc*, proc_hash> *xnspid;
...@@ -41,7 +43,7 @@ static struct gc_state { ...@@ -41,7 +43,7 @@ static struct gc_state {
struct condvar cv; struct condvar cv;
headinfo delayed[NEPOCH]; headinfo delayed[NEPOCH];
headinfo tofree[NEPOCH]; headinfo tofree[NEPOCH];
int ndelayed; atomic<int> ndelayed;
int min_epoch; int min_epoch;
int nrun; int nrun;
int nfree; int nfree;
...@@ -156,13 +158,13 @@ gc_delayfreelist(void) ...@@ -156,13 +158,13 @@ gc_delayfreelist(void)
void void
gc_delayed(rcu_freed *e) gc_delayed(rcu_freed *e)
{ {
__sync_fetch_and_add(&gc_state[mycpu()->id].ndelayed, 1); gc_state[mycpu()->id].ndelayed++;
pushcli(); pushcli();
int c = mycpu()->id; int c = mycpu()->id;
u64 myepoch = myproc()->epoch; u64 myepoch = myproc()->epoch;
u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch; u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch;
if (gc_debug) if (gc_debug)
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed); cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed.load());
if (myepoch != minepoch) { if (myepoch != minepoch) {
cprintf("%d: myepoch %lu minepoch %lu\n", myproc()->pid, myepoch, minepoch); cprintf("%d: myepoch %lu minepoch %lu\n", myproc()->pid, myepoch, minepoch);
panic("gc_delayed_int"); panic("gc_delayed_int");
...@@ -226,7 +228,7 @@ gc_worker(void *x) ...@@ -226,7 +228,7 @@ gc_worker(void *x)
for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) { for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) {
int nfree = gc_free_tofreelist(&(gc_state[mycpu()->id].tofree[i%NEPOCH].head), i); int nfree = gc_free_tofreelist(&(gc_state[mycpu()->id].tofree[i%NEPOCH].head), i);
gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH; gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH;
__sync_fetch_and_sub(&gc_state[mycpu()->id].ndelayed, nfree); gc_state[mycpu()->id].ndelayed -= nfree;
if (0 && nfree > 0) { if (0 && nfree > 0) {
cprintf("%d: epoch %lu freed %d\n", mycpu()->id, i, nfree); cprintf("%d: epoch %lu freed %d\n", mycpu()->id, i, nfree);
} }
......
...@@ -55,38 +55,6 @@ void consoleintr(int(*)(void)); ...@@ -55,38 +55,6 @@ void consoleintr(int(*)(void));
#define assert(c) if (!(c)) { cprintf("%s:%d: ", __FILE__, __LINE__); panic("assertion failure"); } #define assert(c) if (!(c)) { cprintf("%s:%d: ", __FILE__, __LINE__); panic("assertion failure"); }
// crange.c
#if __cplusplus
#include "gc.hh"
struct range : public rcu_freed {
u64 key;
u64 size;
void *value;
int curlevel; // the current levels it appears on
int nlevel; // the number of levels this range should appear
struct crange *cr; // the crange this range is part of
struct range** next; // one next pointer per level
struct spinlock *lock; // on separate cache line?
range() : rcu_freed("range") {}
void do_gc();
} __mpalign__;
struct crange {
int nlevel; // number of levels in the crange skip list
struct range crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
};
struct crange* crange_alloc(int nlevel);
void crange_free(struct crange *cr);
void crange_del(struct crange *cr, u64 k, u64 sz);
void crange_add(struct crange *cr, u64 k, u64 sz, void *v);
struct range* crange_search(struct crange *cr, u64 k, u64 sz, int mod);
int crange_foreach(struct crange *crk, int (*f)(struct range *r, void *st), void *st);
void crange_print(struct crange *cr, int);
#endif
// e1000.c // e1000.c
extern int e1000irq; extern int e1000irq;
extern int e1000init; extern int e1000init;
......
...@@ -69,7 +69,7 @@ sys_kernlet(int fd, size_t count, off_t off) ...@@ -69,7 +69,7 @@ sys_kernlet(int fd, size_t count, off_t off)
if(f->type != file::FD_INODE) if(f->type != file::FD_INODE)
return -1; return -1;
fetchadd(&f->ip->ref, 1); f->ip->ref++;
w = pread_allocwork(f->ip, myproc()->vmap->kshared, count, off); w = pread_allocwork(f->ip, myproc()->vmap->kshared, count, off);
if (w == NULL) { if (w == NULL) {
iput(f->ip); iput(f->ip);
......
...@@ -582,7 +582,7 @@ fork(int flags) ...@@ -582,7 +582,7 @@ fork(int flags)
} }
} else { } else {
np->vmap = myproc()->vmap; np->vmap = myproc()->vmap;
__sync_fetch_and_add(&np->vmap->ref, 1); np->vmap->ref++;
} }
np->brk = myproc()->brk; np->brk = myproc()->brk;
......
...@@ -18,4 +18,5 @@ typedef pme_t pml4e_t; ...@@ -18,4 +18,5 @@ typedef pme_t pml4e_t;
typedef s64 ssize_t; typedef s64 ssize_t;
typedef u64 size_t; typedef u64 size_t;
typedef u64 off_t; typedef u64 off_t;
typedef s64 ptrdiff_t;
#endif #endif
...@@ -14,6 +14,7 @@ extern "C" { ...@@ -14,6 +14,7 @@ extern "C" {
#include "vm.hh" #include "vm.hh"
#include "gc.hh" #include "gc.hh"
#include "crange.hh"
static void vmap_free(void *p); static void vmap_free(void *p);
...@@ -22,7 +23,7 @@ enum { vm_debug = 0 }; ...@@ -22,7 +23,7 @@ enum { vm_debug = 0 };
static void static void
vmn_decref(struct vmnode *n) vmn_decref(struct vmnode *n)
{ {
if(subfetch(&n->ref, 1) == 0) if(--n->ref == 0)
vmn_free(n); vmn_free(n);
} }
...@@ -84,7 +85,7 @@ vmn_allocpg(u64 npg) ...@@ -84,7 +85,7 @@ vmn_allocpg(u64 npg)
void void
vmap_decref(struct vmap *m) vmap_decref(struct vmap *m)
{ {
if(subfetch(&m->ref, 1) == 0) if(--m->ref == 0)
vmap_free(m); vmap_free(m);
} }
...@@ -114,7 +115,7 @@ vmap_alloc(void) ...@@ -114,7 +115,7 @@ vmap_alloc(void)
return 0; return 0;
} }
#ifdef TREE #ifdef TREE
m->cr = crange_alloc(10); m->cr = new crange(10);
if (m->cr == 0) if (m->cr == 0)
return 0; return 0;
#endif #endif
...@@ -216,7 +217,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err) ...@@ -216,7 +217,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
if (vm_debug) if (vm_debug)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n", cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, m->n->ref, myproc()->pid); err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->va_type == COW && (err & FEC_WR)) { if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) { if (pagefault_wcow(vmap, va, pte, m, npg) < 0) {
...@@ -313,10 +314,10 @@ struct state { ...@@ -313,10 +314,10 @@ struct state {
}; };
static int static int
vmap_free_vma(struct range *r, void *st) vmap_free_vma(range *r, void *st)
{ {
delete (vma *) r->value; delete (vma *) r->value;
crange_del(r->cr, r->key, r->size); r->cr->del(r->key, r->size);
return 1; return 1;
} }
...@@ -324,8 +325,8 @@ static void ...@@ -324,8 +325,8 @@ static void
vmap_free(void *p) vmap_free(void *p)
{ {
struct vmap *m = (struct vmap *) p; struct vmap *m = (struct vmap *) p;
crange_foreach(m->cr, vmap_free_vma, NULL); m->cr->foreach(vmap_free_vma, NULL);
crange_free(m->cr); delete m->cr;
ksfree(slab_kshared, m->kshared); ksfree(slab_kshared, m->kshared);
freevm(m->pml4); freevm(m->pml4);
m->pml4 = 0; m->pml4 = 0;
...@@ -346,7 +347,7 @@ vmap_lookup(struct vmap *m, uptr start, uptr len) ...@@ -346,7 +347,7 @@ vmap_lookup(struct vmap *m, uptr start, uptr len)
if(start + len < start) if(start + len < start)
panic("vmap_lookup bad len"); panic("vmap_lookup bad len");
struct range *r = crange_search(m->cr, start, len, 0); range *r = m->cr->search(start, len, 0);
if (r != 0) { if (r != 0) {
struct vma *e = (struct vma *) (r->value); struct vma *e = (struct vma *) (r->value);
if (e->va_end <= e->va_start) if (e->va_end <= e->va_start)
...@@ -379,8 +380,8 @@ vmap_insert(struct vmap *m, struct vmnode *n, uptr va_start) ...@@ -379,8 +380,8 @@ vmap_insert(struct vmap *m, struct vmnode *n, uptr va_start)
e->va_start = va_start; e->va_start = va_start;
e->va_end = va_start + len; e->va_end = va_start + len;
e->n = n; e->n = n;
__sync_fetch_and_add(&n->ref, 1); n->ref++;
crange_add(m->cr, e->va_start, len, (void *) e); m->cr->add(e->va_start, len, (void *) e);
release(&m->lock); release(&m->lock);
return 0; return 0;
} }
...@@ -410,8 +411,8 @@ vmap_copy_vma(struct range *r, void *_st) ...@@ -410,8 +411,8 @@ vmap_copy_vma(struct range *r, void *_st)
if(c->n == 0) { if(c->n == 0) {
return 0; return 0;
} }
__sync_fetch_and_add(&c->n->ref, 1); c->n->ref++;
crange_add(st->cr, c->va_start, c->va_end - c->va_start, (void *) c); st->cr->add(c->va_start, c->va_end - c->va_start, (void *) c);
return 1; return 1;
} }
...@@ -427,7 +428,7 @@ vmap_copy(struct vmap *m, int share) ...@@ -427,7 +428,7 @@ vmap_copy(struct vmap *m, int share)
st.share = share; st.share = share;
st.pml4 = m->pml4; st.pml4 = m->pml4;
st.cr = c->cr; st.cr = c->cr;
if (!crange_foreach(m->cr, vmap_copy_vma, &st)) { if (!m->cr->foreach(vmap_copy_vma, &st)) {
vmap_free(c); vmap_free(c);
release(&m->lock); release(&m->lock);
return 0; return 0;
...@@ -445,7 +446,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len) ...@@ -445,7 +446,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
{ {
acquire(&m->lock); acquire(&m->lock);
uptr va_end = va_start + len; uptr va_end = va_start + len;
struct range *r = crange_search(m->cr, va_start, len, 0); struct range *r = m->cr->search(va_start, len, 0);
if (r == 0) if (r == 0)
panic("no vma?"); panic("no vma?");
struct vma *e = (struct vma *) r->value; struct vma *e = (struct vma *) r->value;
...@@ -454,7 +455,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len) ...@@ -454,7 +455,7 @@ vmap_remove(struct vmap *m, uptr va_start, u64 len)
release(&m->lock); release(&m->lock);
return -1; return -1;
} }
crange_del(m->cr, va_start, len); m->cr->del(va_start, len);
gc_delayed(e); gc_delayed(e);
release(&m->lock); release(&m->lock);
return 0; return 0;
......
#define TREE #define TREE
#include "gc.hh" #include "gc.hh"
#include "atomic.hh"
// A mapping of a chunk of an address space to // A mapping of a chunk of an address space to
// a specific memory object. // a specific memory object.
...@@ -27,7 +28,7 @@ struct vma : public rcu_freed { ...@@ -27,7 +28,7 @@ struct vma : public rcu_freed {
struct vmnode { struct vmnode {
u64 npages; u64 npages;
char *page[128]; char *page[128];
u64 ref; atomic<u64> ref;
enum vmntype type; enum vmntype type;
struct inode *ip; struct inode *ip;
u64 offset; u64 offset;
...@@ -44,7 +45,7 @@ struct vmap { ...@@ -44,7 +45,7 @@ struct vmap {
struct vma* e[16]; struct vma* e[16];
#endif #endif
struct spinlock lock; // serialize map/lookup/unmap struct spinlock lock; // serialize map/lookup/unmap
u64 ref; atomic<u64> ref;
u64 alloc; u64 alloc;
pml4e_t *pml4; // Page table pml4e_t *pml4; // Page table
char *kshared; char *kshared;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论