提交 e2751a96 创建 作者: Frans Kaashoek's avatar Frans Kaashoek

Merge branch 'scale-amd64' of ssh://amsterdam.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

Conflicts: crange.cc kernel.h
......@@ -13,25 +13,32 @@ MTRACE ?= $(QEMU)
HW ?= qemu
O = o.$(HW)
ifndef CONFIG_MK
CC = $(TOOLPREFIX)gcc
ifdef USE_CLANG
CC = $(TOOLPREFIX)clang
CXX = $(TOOLPREFIX)clang++
CXXFLAGS = -Wno-delete-non-virtual-dtor -Wno-gnu-designator
CFLAGS = -no-integrated-as
else
CC = $(TOOLPREFIX)gcc
CXX = $(TOOLPREFIX)g++
CXXFLAGS =
CFLAGS =
endif
LD = $(TOOLPREFIX)ld
NM = $(TOOLPREFIX)nm
OBJCOPY = $(TOOLPREFIX)objcopy
STRIP = $(TOOLPREFIX)strip
COMFLAGS := -static -fno-builtin -fno-strict-aliasing -O2 -Wall \
COMFLAGS = -static -fno-builtin -fno-strict-aliasing -O2 -Wall \
-g -MD -m64 -Werror -fms-extensions -mno-sse \
-mcmodel=large -mno-red-zone -I$(QEMUSRC) -fno-omit-frame-pointer \
-DHW_$(HW) -include param.h -include compiler.h -DXV6 \
-Wno-gnu-designator
-DHW_$(HW) -include param.h -include compiler.h -DXV6
COMFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
CFLAGS := $(COMFLAGS) -std=c99
CXXFLAGS := $(COMFLAGS) -std=c++0x -Wno-sign-compare -fno-exceptions -fno-rtti
ASFLAGS = -m64 -gdwarf-2 -MD
LDFLAGS += -m elf_x86_64
CFLAGS := $(COMFLAGS) -std=c99 $(CFLAGS)
CXXFLAGS := $(COMFLAGS) -std=c++0x -Wno-sign-compare -fno-exceptions -fno-rtti $(CXXFLAGS)
ASFLAGS = -m64 -gdwarf-2 -MD
LDFLAGS = -m elf_x86_64
OBJS = \
bio.o \
......@@ -91,6 +98,7 @@ UPROGS= \
_forkexectree \
_forkexecbench \
_forktree \
_login \
_ls \
_mapbench \
_maptest \
......
......@@ -36,3 +36,13 @@
download, make, make install gmp-5.0.4, mpfr-3.1.0, and mpc-0.9
for mpfr and mpc: ./configure --with-gmp=/usr/local
for gcc: ../configure --target=x86_64-jos-elf --with-gmp=/usr/local/ --with-mpfr=/usr/local --enable-languages=c,c++ --without-headers --disable-nls
* clang
- Version 3.0 or greater is required to build xv6
- http://llvm.org/docs/GettingStarted.html
$ git clone http://llvm.org/git/llvm.git
$ (cd llvm/tools && git clone http://llvm.org/git/clang.git)
$ CC=gcc CXX=g++ ./configure --prefix=[PREFIX] \
--enable-targets=x86_64 --enable-optimized
$ CC=gcc CXX=g++ make && make install
#pragma once
#define _GLIBCXX_VISIBILITY(x)
#define _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define _GLIBCXX_END_NAMESPACE_VERSION
#define _GLIBCXX_BEGIN_EXTERN_C extern "C" {
#define _GLIBCXX_END_EXTERN_C }
#define __glibcxx_assert(x)
#include "atomic_base.h"
#include "atomic_2.h"
template<class T>
struct atomic : public std::__atomic2::__atomic_base<T> {
atomic() = default;
~atomic() = default;
atomic(const atomic&) = delete;
atomic& operator=(const atomic&) = delete;
atomic& operator=(const atomic&) volatile = delete;
constexpr atomic(T v) : std::__atomic2::__atomic_base<T>(v) {}
using std::__atomic2::__atomic_base<T>::operator T;
using std::__atomic2::__atomic_base<T>::operator=;
};
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_2.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_2_H
#define _GLIBCXX_ATOMIC_2_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
// 2 == __atomic2 == Always lock-free
// Assumed:
// _GLIBCXX_ATOMIC_BUILTINS_1
// _GLIBCXX_ATOMIC_BUILTINS_2
// _GLIBCXX_ATOMIC_BUILTINS_4
// _GLIBCXX_ATOMIC_BUILTINS_8
namespace __atomic2
{
/// atomic_flag
struct atomic_flag : public __atomic_flag_base
{
atomic_flag() = default;
~atomic_flag() = default;
atomic_flag(const atomic_flag&) = delete;
atomic_flag& operator=(const atomic_flag&) = delete;
atomic_flag& operator=(const atomic_flag&) volatile = delete;
// Conversion to ATOMIC_FLAG_INIT.
atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
bool
test_and_set(memory_order __m = memory_order_seq_cst)
{
// Redundant synchronize if built-in for lock is a full barrier.
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
return __sync_lock_test_and_set(&_M_i, 1);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile
{
// Redundant synchronize if built-in for lock is a full barrier.
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
return __sync_lock_test_and_set(&_M_i, 1);
}
void
clear(memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_consume);
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
}
void
clear(memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_consume);
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_lock_release(&_M_i);
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
__sync_synchronize();
}
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
//
// atomic_bool bool
// atomic_char char
// atomic_schar signed char
// atomic_uchar unsigned char
// atomic_short short
// atomic_ushort unsigned short
// atomic_int int
// atomic_uint unsigned int
// atomic_long long
// atomic_ulong unsigned long
// atomic_llong long long
// atomic_ullong unsigned long long
// atomic_char16_t char16_t
// atomic_char32_t char32_t
// atomic_wchar_t wchar_t
//
// NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
// 8 bytes, since that is what GCC built-in functions for atomic
// memory access expect.
template<typename _ITp>
struct __atomic_base
{
private:
typedef _ITp __int_type;
__int_type _M_i;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __int_type convertible to _M_i.
constexpr __atomic_base(__int_type __i): _M_i (__i) { }
operator __int_type() const
{ return load(); }
operator __int_type() const volatile
{ return load(); }
__int_type
operator=(__int_type __i)
{
store(__i);
return __i;
}
__int_type
operator=(__int_type __i) volatile
{
store(__i);
return __i;
}
__int_type
operator++(int)
{ return fetch_add(1); }
__int_type
operator++(int) volatile
{ return fetch_add(1); }
__int_type
operator--(int)
{ return fetch_sub(1); }
__int_type
operator--(int) volatile
{ return fetch_sub(1); }
__int_type
operator++()
{ return __sync_add_and_fetch(&_M_i, 1); }
__int_type
operator++() volatile
{ return __sync_add_and_fetch(&_M_i, 1); }
__int_type
operator--()
{ return __sync_sub_and_fetch(&_M_i, 1); }
__int_type
operator--() volatile
{ return __sync_sub_and_fetch(&_M_i, 1); }
__int_type
operator+=(__int_type __i)
{ return __sync_add_and_fetch(&_M_i, __i); }
__int_type
operator+=(__int_type __i) volatile
{ return __sync_add_and_fetch(&_M_i, __i); }
__int_type
operator-=(__int_type __i)
{ return __sync_sub_and_fetch(&_M_i, __i); }
__int_type
operator-=(__int_type __i) volatile
{ return __sync_sub_and_fetch(&_M_i, __i); }
__int_type
operator&=(__int_type __i)
{ return __sync_and_and_fetch(&_M_i, __i); }
__int_type
operator&=(__int_type __i) volatile
{ return __sync_and_and_fetch(&_M_i, __i); }
__int_type
operator|=(__int_type __i)
{ return __sync_or_and_fetch(&_M_i, __i); }
__int_type
operator|=(__int_type __i) volatile
{ return __sync_or_and_fetch(&_M_i, __i); }
__int_type
operator^=(__int_type __i)
{ return __sync_xor_and_fetch(&_M_i, __i); }
__int_type
operator^=(__int_type __i) volatile
{ return __sync_xor_and_fetch(&_M_i, __i); }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__int_type __i, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __i;
else
{
// write_mem_barrier();
_M_i = __i;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_i = __i;
else
{
// write_mem_barrier();
_M_i = __i;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__int_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__int_type __ret = _M_i;
__sync_synchronize();
return __ret;
}
__int_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__int_type __ret = _M_i;
__sync_synchronize();
return __ret;
}
__int_type
exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __i);
}
__int_type
exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_i, __i);
}
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2)
{ return compare_exchange_strong(__i1, __i2, __m1, __m2); }
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) volatile
{ return compare_exchange_strong(__i1, __i2, __m1, __m2); }
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_weak(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_weak(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_weak(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__int_type __i1o = __i1;
__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
// Assume extra stores (of same value) allowed in true case.
__i1 = __i1n;
return __i1o == __i1n;
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__int_type __i1o = __i1;
__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
// Assume extra stores (of same value) allowed in true case.
__i1 = __i1n;
return __i1o == __i1n;
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst)
{
return compare_exchange_strong(__i1, __i2, __m,
__calculate_memory_order(__m));
}
bool
compare_exchange_strong(__int_type& __i1, __int_type __i2,
memory_order __m = memory_order_seq_cst) volatile
{
return compare_exchange_strong(__i1, __i2, __m,
__calculate_memory_order(__m));
}
__int_type
fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_i, __i); }
__int_type
fetch_add(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_i, __i); }
__int_type
fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_i, __i); }
__int_type
fetch_sub(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_i, __i); }
__int_type
fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_and(&_M_i, __i); }
__int_type
fetch_and(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_and(&_M_i, __i); }
__int_type
fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_or(&_M_i, __i); }
__int_type
fetch_or(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_or(&_M_i, __i); }
__int_type
fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_xor(&_M_i, __i); }
__int_type
fetch_xor(__int_type __i,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_xor(&_M_i, __i); }
};
/// Partial specialization for pointer types.
template<typename _PTp>
struct __atomic_base<_PTp*>
{
private:
typedef _PTp* __pointer_type;
__pointer_type _M_p;
public:
__atomic_base() = default;
~__atomic_base() = default;
__atomic_base(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) = delete;
__atomic_base& operator=(const __atomic_base&) volatile = delete;
// Requires __pointer_type convertible to _M_p.
constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
operator __pointer_type() const
{ return load(); }
operator __pointer_type() const volatile
{ return load(); }
__pointer_type
operator=(__pointer_type __p)
{
store(__p);
return __p;
}
__pointer_type
operator=(__pointer_type __p) volatile
{
store(__p);
return __p;
}
__pointer_type
operator++(int)
{ return fetch_add(1); }
__pointer_type
operator++(int) volatile
{ return fetch_add(1); }
__pointer_type
operator--(int)
{ return fetch_sub(1); }
__pointer_type
operator--(int) volatile
{ return fetch_sub(1); }
__pointer_type
operator++()
{ return fetch_add(1) + 1; }
__pointer_type
operator++() volatile
{ return fetch_add(1) + 1; }
__pointer_type
operator--()
{ return fetch_sub(1) -1; }
__pointer_type
operator--() volatile
{ return fetch_sub(1) -1; }
__pointer_type
operator+=(ptrdiff_t __d)
{ return fetch_add(__d) + __d; }
__pointer_type
operator+=(ptrdiff_t __d) volatile
{ return fetch_add(__d) + __d; }
__pointer_type
operator-=(ptrdiff_t __d)
{ return fetch_sub(__d) - __d; }
__pointer_type
operator-=(ptrdiff_t __d) volatile
{ return fetch_sub(__d) - __d; }
bool
is_lock_free() const
{ return true; }
bool
is_lock_free() const volatile
{ return true; }
void
store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
void
store(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
__glibcxx_assert(__m != memory_order_consume);
if (__m == memory_order_relaxed)
_M_p = __p;
else
{
// write_mem_barrier();
_M_p = __p;
if (__m == memory_order_seq_cst)
__sync_synchronize();
}
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
load(memory_order __m = memory_order_seq_cst) const volatile
{
__glibcxx_assert(__m != memory_order_release);
__glibcxx_assert(__m != memory_order_acq_rel);
__sync_synchronize();
__pointer_type __ret = _M_p;
__sync_synchronize();
return __ret;
}
__pointer_type
exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
__pointer_type
exchange(__pointer_type __p,
memory_order __m = memory_order_seq_cst) volatile
{
// XXX built-in assumes memory_order_acquire.
return __sync_lock_test_and_set(&_M_p, __p);
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2)
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
bool
compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
memory_order __m1, memory_order __m2) volatile
{
__glibcxx_assert(__m2 != memory_order_release);
__glibcxx_assert(__m2 != memory_order_acq_rel);
__glibcxx_assert(__m2 <= __m1);
__pointer_type __p1o = __p1;
__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
// Assume extra stores (of same value) allowed in true case.
__p1 = __p1n;
return __p1o == __p1n;
}
__pointer_type
fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_add(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
{ return __sync_fetch_and_sub(&_M_p, __d); }
__pointer_type
fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst) volatile
{ return __sync_fetch_and_sub(&_M_p, __d); }
};
} // namespace __atomic2
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_BASE_H
#define _GLIBCXX_ATOMIC_BASE_H 1
#pragma GCC system_header
namespace std _GLIBCXX_VISIBILITY(default)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/**
* @defgroup atomics Atomics
*
* Components for performing atomic operations.
* @{
*/
/// Enumeration for memory_order
typedef enum memory_order
{
memory_order_relaxed,
memory_order_consume,
memory_order_acquire,
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
} memory_order;
inline memory_order
__calculate_memory_order(memory_order __m)
{
const bool __cond1 = __m == memory_order_release;
const bool __cond2 = __m == memory_order_acq_rel;
memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
return __mo2;
}
void
atomic_thread_fence(memory_order);
void
atomic_signal_fence(memory_order);
/// kill_dependency
template<typename _Tp>
inline _Tp
kill_dependency(_Tp __y)
{
_Tp __ret(__y);
return __ret;
}
/**
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
* compatibilty with a C interface, this allows different
* implementations of atomic_flag to use the same atomic operation
* functions, via a standard conversion to the __atomic_flag_base
* argument.
*/
_GLIBCXX_BEGIN_EXTERN_C
struct __atomic_flag_base
{
bool _M_i;
};
_GLIBCXX_END_EXTERN_C
#define ATOMIC_FLAG_INIT { false }
// Base types for atomics.
//
// Three nested namespaces for atomic implementation details.
//
// The nested namespace inlined into std:: is determined by the value
// of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
// ATOMIC_*_LOCK_FREE macros.
//
// 0 == __atomic0 == Never lock-free
// 1 == __atomic1 == Best available, sometimes lock-free
// 2 == __atomic2 == Always lock-free
namespace __atomic0
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic2
{
struct atomic_flag;
template<typename _IntTp>
struct __atomic_base;
}
namespace __atomic1
{
using __atomic2::atomic_flag;
using __atomic0::__atomic_base;
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
&& defined(_GLIBCXX_ATOMIC_BUILTINS_4) && defined(_GLIBCXX_ATOMIC_BUILTINS_8)
# define _GLIBCXX_ATOMIC_PROPERTY 2
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic2
#elif defined(_GLIBCXX_ATOMIC_BUILTINS_1)
# define _GLIBCXX_ATOMIC_PROPERTY 1
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic1
#else
# define _GLIBCXX_ATOMIC_PROPERTY 0
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic0
#endif
#define ATOMIC_CHAR_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR16_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR32_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_WCHAR_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_SHORT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline namespace _GLIBCXX_ATOMIC_NAMESPACE { }
#define ATOMIC_VAR_INIT(_VI) { _VI }
template<typename _Tp>
struct atomic;
template<typename _Tp>
struct atomic<_Tp*>;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
} // namespace std
#endif
......@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer)
if(!(b->flags & B_VALID))
iderw(b);
if (writer && !origwriter) {
__sync_fetch_and_and(&b->flags, ~B_BUSY);
b->flags &= ~B_BUSY;
cv_wakeup(&b->cv);
}
return b;
......@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer)
if (writer) {
if((b->flags & B_BUSY) == 0)
panic("brelse");
__sync_fetch_and_and(&b->flags, ~B_BUSY);
b->flags &= ~B_BUSY;
cv_wakeup(&b->cv);
}
// rcu_begin_read() happens in bread
......
#include "gc.hh"
#include "atomic.hh"
struct buf : public rcu_freed {
int flags;
atomic<int> flags;
u32 dev;
u64 sector;
struct buf *prev; // LRU cache list
......
......@@ -3,19 +3,27 @@ extern "C" {
#include "kernel.h"
}
#include "cpputil.hh"
void *
operator new(unsigned long nbytes)
{
return kmalloc(nbytes);
}
void *
operator new(unsigned long nbytes, void *buf)
{
return buf;
}
void
operator delete(void *p)
{
kmfree(p);
}
extern "C" void
void
__cxa_pure_virtual(void)
{
panic("__cxa_pure_virtual");
......
......@@ -33,3 +33,10 @@ mkpair(const A &a, const B &b)
{
return pair<A, B>(a, b);
}
/* C++ runtime */
void *operator new(unsigned long nbytes);
void *operator new(unsigned long nbytes, void *buf);
void operator delete(void *p);
extern "C" void __cxa_pure_virtual(void);
......@@ -10,6 +10,7 @@ extern "C" {
}
#include "gc.hh"
#include "crange.hh"
#include "cpputil.hh"
//
// Concurrent atomic range operations using skip lists. An insert may split an
......
......@@ -28,7 +28,7 @@ filealloc(void)
struct file*
filedup(struct file *f)
{
if (__sync_fetch_and_add(&f->ref, 1) < 1)
if (f->ref++ < 1)
panic("filedup");
return f;
}
......@@ -37,7 +37,7 @@ filedup(struct file *f)
void
fileclose(struct file *f)
{
if (subfetch(&f->ref, 1) > 0)
if (--f->ref > 0)
return;
if(f->type == file::FD_PIPE)
......
#include "cpputil.hh"
#include "ns.hh"
#include "gc.hh"
#include "atomic.hh"
u64 namehash(const strbuf<DIRSIZ>&);
struct file {
enum { FD_NONE, FD_PIPE, FD_INODE, FD_SOCKET } type;
int ref; // reference count
atomic<int> ref; // reference count
char readable;
char writable;
......
......@@ -55,8 +55,6 @@ void consoleintr(int(*)(void));
#define assert(c) if (!(c)) { cprintf("%s:%d: ", __FILE__, __LINE__); panic("assertion failure"); }
// crange.c
// e1000.c
extern int e1000irq;
extern int e1000init;
......
#include "types.h"
#include "user.h"
static const char *
readpw(void)
{
static char pw[64];
for (int i = 0; i < sizeof(pw); i++) {
int r = read(0, &pw[i], 1);
if (r != 1)
return 0;
if (pw[i] == '\r') {
pw[i] = 0;
} else if (pw[i] == '\n') {
pw[i] = 0;
return pw;
}
}
return 0;
}
int
main(void)
{
const char *pw;
printf(1, "password: ");
pw = readpw();
if (pw && !strcmp(pw, "xv6")) {
static const char *argv[] = { "/sh", 0 };
exec(argv[0], argv);
}
exit();
}
......@@ -8,7 +8,7 @@
#if SPINLOCK_DEBUG
#define NHASH 10
#else
#define NHASH 100
#define NHASH 30
#endif
class scoped_gc_epoch {
......
......@@ -46,16 +46,18 @@ main(void)
continue;
}
if (pid == 0) {
static const char *argv[] = { "sh", 0 };
static const char *argv[] = { "/login", 0 };
close(0);
close(1);
close(2);
dup(ss);
dup(ss);
dup(ss);
exec("sh", argv);
exec(argv[0], argv);
exit();
}
close(ss);
wait();
printf(1, "telnetd: connection closed\n");
}
}
......@@ -18,4 +18,5 @@ typedef pme_t pml4e_t;
typedef s64 ssize_t;
typedef u64 size_t;
typedef u64 off_t;
typedef s64 ptrdiff_t;
#endif
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论