Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
d61e3535
提交
d61e3535
2月 14, 2012
创建
作者:
Nickolai Zeldovich
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
c++11 atomic template from libstdc++
上级
db8baf3d
隐藏空白字符变更
内嵌
并排
正在显示
8 个修改的文件
包含
878 行增加
和
6 行删除
+878
-6
atomic.hh
atomic.hh
+25
-0
atomic_2.h
atomic_2.h
+669
-0
atomic_base.h
atomic_base.h
+175
-0
bio.cc
bio.cc
+2
-2
buf.hh
buf.hh
+2
-1
file.cc
file.cc
+2
-2
file.hh
file.hh
+2
-1
types.h
types.h
+1
-0
没有找到文件。
atomic.hh
0 → 100644
浏览文件 @
d61e3535
#pragma once
#define _GLIBCXX_VISIBILITY(x)
#define _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define _GLIBCXX_END_NAMESPACE_VERSION
#define _GLIBCXX_BEGIN_EXTERN_C extern "C" {
#define _GLIBCXX_END_EXTERN_C }
#define __glibcxx_assert(x)
#include "atomic_base.h"
#include "atomic_2.h"
template
<
class
T
>
struct
atomic
:
public
std
::
__atomic2
::
__atomic_base
<
T
>
{
atomic
()
=
default
;
~
atomic
()
=
default
;
atomic
(
const
atomic
&
)
=
delete
;
atomic
&
operator
=
(
const
atomic
&
)
=
delete
;
atomic
&
operator
=
(
const
atomic
&
)
volatile
=
delete
;
constexpr
atomic
(
T
v
)
:
std
::
__atomic2
::
__atomic_base
<
T
>
(
v
)
{}
using
std
::
__atomic2
::
__atomic_base
<
T
>::
operator
T
;
using
std
::
__atomic2
::
__atomic_base
<
T
>::
operator
=
;
};
atomic_2.h
0 → 100644
浏览文件 @
d61e3535
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011
// Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_2.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_2_H
#define _GLIBCXX_ATOMIC_2_H 1
#pragma GCC system_header
namespace
std
_GLIBCXX_VISIBILITY
(
default
)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
// 2 == __atomic2 == Always lock-free
// Assumed:
// _GLIBCXX_ATOMIC_BUILTINS_1
// _GLIBCXX_ATOMIC_BUILTINS_2
// _GLIBCXX_ATOMIC_BUILTINS_4
// _GLIBCXX_ATOMIC_BUILTINS_8
namespace
__atomic2
{
/// atomic_flag
struct
atomic_flag
:
public
__atomic_flag_base
{
atomic_flag
()
=
default
;
~
atomic_flag
()
=
default
;
atomic_flag
(
const
atomic_flag
&
)
=
delete
;
atomic_flag
&
operator
=
(
const
atomic_flag
&
)
=
delete
;
atomic_flag
&
operator
=
(
const
atomic_flag
&
)
volatile
=
delete
;
// Conversion to ATOMIC_FLAG_INIT.
atomic_flag
(
bool
__i
)
:
__atomic_flag_base
({
__i
})
{
}
bool
test_and_set
(
memory_order
__m
=
memory_order_seq_cst
)
{
// Redundant synchronize if built-in for lock is a full barrier.
if
(
__m
!=
memory_order_acquire
&&
__m
!=
memory_order_acq_rel
)
__sync_synchronize
();
return
__sync_lock_test_and_set
(
&
_M_i
,
1
);
}
bool
test_and_set
(
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
// Redundant synchronize if built-in for lock is a full barrier.
if
(
__m
!=
memory_order_acquire
&&
__m
!=
memory_order_acq_rel
)
__sync_synchronize
();
return
__sync_lock_test_and_set
(
&
_M_i
,
1
);
}
void
clear
(
memory_order
__m
=
memory_order_seq_cst
)
{
__glibcxx_assert
(
__m
!=
memory_order_consume
);
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_lock_release
(
&
_M_i
);
if
(
__m
!=
memory_order_acquire
&&
__m
!=
memory_order_acq_rel
)
__sync_synchronize
();
}
void
clear
(
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
__glibcxx_assert
(
__m
!=
memory_order_consume
);
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_lock_release
(
&
_M_i
);
if
(
__m
!=
memory_order_acquire
&&
__m
!=
memory_order_acq_rel
)
__sync_synchronize
();
}
};
/// Base class for atomic integrals.
//
// For each of the integral types, define atomic_[integral type] struct
//
// atomic_bool bool
// atomic_char char
// atomic_schar signed char
// atomic_uchar unsigned char
// atomic_short short
// atomic_ushort unsigned short
// atomic_int int
// atomic_uint unsigned int
// atomic_long long
// atomic_ulong unsigned long
// atomic_llong long long
// atomic_ullong unsigned long long
// atomic_char16_t char16_t
// atomic_char32_t char32_t
// atomic_wchar_t wchar_t
//
// NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
// 8 bytes, since that is what GCC built-in functions for atomic
// memory access expect.
template
<
typename
_ITp
>
struct
__atomic_base
{
private:
typedef
_ITp
__int_type
;
__int_type
_M_i
;
public:
__atomic_base
()
=
default
;
~
__atomic_base
()
=
default
;
__atomic_base
(
const
__atomic_base
&
)
=
delete
;
__atomic_base
&
operator
=
(
const
__atomic_base
&
)
=
delete
;
__atomic_base
&
operator
=
(
const
__atomic_base
&
)
volatile
=
delete
;
// Requires __int_type convertible to _M_i.
constexpr
__atomic_base
(
__int_type
__i
)
:
_M_i
(
__i
)
{
}
operator
__int_type
()
const
{
return
load
();
}
operator
__int_type
()
const
volatile
{
return
load
();
}
__int_type
operator
=
(
__int_type
__i
)
{
store
(
__i
);
return
__i
;
}
__int_type
operator
=
(
__int_type
__i
)
volatile
{
store
(
__i
);
return
__i
;
}
__int_type
operator
++
(
int
)
{
return
fetch_add
(
1
);
}
__int_type
operator
++
(
int
)
volatile
{
return
fetch_add
(
1
);
}
__int_type
operator
--
(
int
)
{
return
fetch_sub
(
1
);
}
__int_type
operator
--
(
int
)
volatile
{
return
fetch_sub
(
1
);
}
__int_type
operator
++
()
{
return
__sync_add_and_fetch
(
&
_M_i
,
1
);
}
__int_type
operator
++
()
volatile
{
return
__sync_add_and_fetch
(
&
_M_i
,
1
);
}
__int_type
operator
--
()
{
return
__sync_sub_and_fetch
(
&
_M_i
,
1
);
}
__int_type
operator
--
()
volatile
{
return
__sync_sub_and_fetch
(
&
_M_i
,
1
);
}
__int_type
operator
+=
(
__int_type
__i
)
{
return
__sync_add_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
+=
(
__int_type
__i
)
volatile
{
return
__sync_add_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
-=
(
__int_type
__i
)
{
return
__sync_sub_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
-=
(
__int_type
__i
)
volatile
{
return
__sync_sub_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
&=
(
__int_type
__i
)
{
return
__sync_and_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
&=
(
__int_type
__i
)
volatile
{
return
__sync_and_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
|=
(
__int_type
__i
)
{
return
__sync_or_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
|=
(
__int_type
__i
)
volatile
{
return
__sync_or_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
^=
(
__int_type
__i
)
{
return
__sync_xor_and_fetch
(
&
_M_i
,
__i
);
}
__int_type
operator
^=
(
__int_type
__i
)
volatile
{
return
__sync_xor_and_fetch
(
&
_M_i
,
__i
);
}
bool
is_lock_free
()
const
{
return
true
;
}
bool
is_lock_free
()
const
volatile
{
return
true
;
}
void
store
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m
!=
memory_order_consume
);
if
(
__m
==
memory_order_relaxed
)
_M_i
=
__i
;
else
{
// write_mem_barrier();
_M_i
=
__i
;
if
(
__m
==
memory_order_seq_cst
)
__sync_synchronize
();
}
}
void
store
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m
!=
memory_order_consume
);
if
(
__m
==
memory_order_relaxed
)
_M_i
=
__i
;
else
{
// write_mem_barrier();
_M_i
=
__i
;
if
(
__m
==
memory_order_seq_cst
)
__sync_synchronize
();
}
}
__int_type
load
(
memory_order
__m
=
memory_order_seq_cst
)
const
{
__glibcxx_assert
(
__m
!=
memory_order_release
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_synchronize
();
__int_type
__ret
=
_M_i
;
__sync_synchronize
();
return
__ret
;
}
__int_type
load
(
memory_order
__m
=
memory_order_seq_cst
)
const
volatile
{
__glibcxx_assert
(
__m
!=
memory_order_release
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_synchronize
();
__int_type
__ret
=
_M_i
;
__sync_synchronize
();
return
__ret
;
}
__int_type
exchange
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
// XXX built-in assumes memory_order_acquire.
return
__sync_lock_test_and_set
(
&
_M_i
,
__i
);
}
__int_type
exchange
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
// XXX built-in assumes memory_order_acquire.
return
__sync_lock_test_and_set
(
&
_M_i
,
__i
);
}
bool
compare_exchange_weak
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m1
,
memory_order
__m2
)
{
return
compare_exchange_strong
(
__i1
,
__i2
,
__m1
,
__m2
);
}
bool
compare_exchange_weak
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m1
,
memory_order
__m2
)
volatile
{
return
compare_exchange_strong
(
__i1
,
__i2
,
__m1
,
__m2
);
}
bool
compare_exchange_weak
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
compare_exchange_weak
(
__i1
,
__i2
,
__m
,
__calculate_memory_order
(
__m
));
}
bool
compare_exchange_weak
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
compare_exchange_weak
(
__i1
,
__i2
,
__m
,
__calculate_memory_order
(
__m
));
}
bool
compare_exchange_strong
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m1
,
memory_order
__m2
)
{
__glibcxx_assert
(
__m2
!=
memory_order_release
);
__glibcxx_assert
(
__m2
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m2
<=
__m1
);
__int_type
__i1o
=
__i1
;
__int_type
__i1n
=
__sync_val_compare_and_swap
(
&
_M_i
,
__i1o
,
__i2
);
// Assume extra stores (of same value) allowed in true case.
__i1
=
__i1n
;
return
__i1o
==
__i1n
;
}
bool
compare_exchange_strong
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m1
,
memory_order
__m2
)
volatile
{
__glibcxx_assert
(
__m2
!=
memory_order_release
);
__glibcxx_assert
(
__m2
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m2
<=
__m1
);
__int_type
__i1o
=
__i1
;
__int_type
__i1n
=
__sync_val_compare_and_swap
(
&
_M_i
,
__i1o
,
__i2
);
// Assume extra stores (of same value) allowed in true case.
__i1
=
__i1n
;
return
__i1o
==
__i1n
;
}
bool
compare_exchange_strong
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
compare_exchange_strong
(
__i1
,
__i2
,
__m
,
__calculate_memory_order
(
__m
));
}
bool
compare_exchange_strong
(
__int_type
&
__i1
,
__int_type
__i2
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
compare_exchange_strong
(
__i1
,
__i2
,
__m
,
__calculate_memory_order
(
__m
));
}
__int_type
fetch_add
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_add
(
&
_M_i
,
__i
);
}
__int_type
fetch_add
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_add
(
&
_M_i
,
__i
);
}
__int_type
fetch_sub
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_sub
(
&
_M_i
,
__i
);
}
__int_type
fetch_sub
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_sub
(
&
_M_i
,
__i
);
}
__int_type
fetch_and
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_and
(
&
_M_i
,
__i
);
}
__int_type
fetch_and
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_and
(
&
_M_i
,
__i
);
}
__int_type
fetch_or
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_or
(
&
_M_i
,
__i
);
}
__int_type
fetch_or
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_or
(
&
_M_i
,
__i
);
}
__int_type
fetch_xor
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_xor
(
&
_M_i
,
__i
);
}
__int_type
fetch_xor
(
__int_type
__i
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_xor
(
&
_M_i
,
__i
);
}
};
/// Partial specialization for pointer types.
template
<
typename
_PTp
>
struct
__atomic_base
<
_PTp
*>
{
private:
typedef
_PTp
*
__pointer_type
;
__pointer_type
_M_p
;
public:
__atomic_base
()
=
default
;
~
__atomic_base
()
=
default
;
__atomic_base
(
const
__atomic_base
&
)
=
delete
;
__atomic_base
&
operator
=
(
const
__atomic_base
&
)
=
delete
;
__atomic_base
&
operator
=
(
const
__atomic_base
&
)
volatile
=
delete
;
// Requires __pointer_type convertible to _M_p.
constexpr
__atomic_base
(
__pointer_type
__p
)
:
_M_p
(
__p
)
{
}
operator
__pointer_type
()
const
{
return
load
();
}
operator
__pointer_type
()
const
volatile
{
return
load
();
}
__pointer_type
operator
=
(
__pointer_type
__p
)
{
store
(
__p
);
return
__p
;
}
__pointer_type
operator
=
(
__pointer_type
__p
)
volatile
{
store
(
__p
);
return
__p
;
}
__pointer_type
operator
++
(
int
)
{
return
fetch_add
(
1
);
}
__pointer_type
operator
++
(
int
)
volatile
{
return
fetch_add
(
1
);
}
__pointer_type
operator
--
(
int
)
{
return
fetch_sub
(
1
);
}
__pointer_type
operator
--
(
int
)
volatile
{
return
fetch_sub
(
1
);
}
__pointer_type
operator
++
()
{
return
fetch_add
(
1
)
+
1
;
}
__pointer_type
operator
++
()
volatile
{
return
fetch_add
(
1
)
+
1
;
}
__pointer_type
operator
--
()
{
return
fetch_sub
(
1
)
-
1
;
}
__pointer_type
operator
--
()
volatile
{
return
fetch_sub
(
1
)
-
1
;
}
__pointer_type
operator
+=
(
ptrdiff_t
__d
)
{
return
fetch_add
(
__d
)
+
__d
;
}
__pointer_type
operator
+=
(
ptrdiff_t
__d
)
volatile
{
return
fetch_add
(
__d
)
+
__d
;
}
__pointer_type
operator
-=
(
ptrdiff_t
__d
)
{
return
fetch_sub
(
__d
)
-
__d
;
}
__pointer_type
operator
-=
(
ptrdiff_t
__d
)
volatile
{
return
fetch_sub
(
__d
)
-
__d
;
}
bool
is_lock_free
()
const
{
return
true
;
}
bool
is_lock_free
()
const
volatile
{
return
true
;
}
void
store
(
__pointer_type
__p
,
memory_order
__m
=
memory_order_seq_cst
)
{
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m
!=
memory_order_consume
);
if
(
__m
==
memory_order_relaxed
)
_M_p
=
__p
;
else
{
// write_mem_barrier();
_M_p
=
__p
;
if
(
__m
==
memory_order_seq_cst
)
__sync_synchronize
();
}
}
void
store
(
__pointer_type
__p
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
__glibcxx_assert
(
__m
!=
memory_order_acquire
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m
!=
memory_order_consume
);
if
(
__m
==
memory_order_relaxed
)
_M_p
=
__p
;
else
{
// write_mem_barrier();
_M_p
=
__p
;
if
(
__m
==
memory_order_seq_cst
)
__sync_synchronize
();
}
}
__pointer_type
load
(
memory_order
__m
=
memory_order_seq_cst
)
const
{
__glibcxx_assert
(
__m
!=
memory_order_release
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_synchronize
();
__pointer_type
__ret
=
_M_p
;
__sync_synchronize
();
return
__ret
;
}
__pointer_type
load
(
memory_order
__m
=
memory_order_seq_cst
)
const
volatile
{
__glibcxx_assert
(
__m
!=
memory_order_release
);
__glibcxx_assert
(
__m
!=
memory_order_acq_rel
);
__sync_synchronize
();
__pointer_type
__ret
=
_M_p
;
__sync_synchronize
();
return
__ret
;
}
__pointer_type
exchange
(
__pointer_type
__p
,
memory_order
__m
=
memory_order_seq_cst
)
{
// XXX built-in assumes memory_order_acquire.
return
__sync_lock_test_and_set
(
&
_M_p
,
__p
);
}
__pointer_type
exchange
(
__pointer_type
__p
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
// XXX built-in assumes memory_order_acquire.
return
__sync_lock_test_and_set
(
&
_M_p
,
__p
);
}
bool
compare_exchange_strong
(
__pointer_type
&
__p1
,
__pointer_type
__p2
,
memory_order
__m1
,
memory_order
__m2
)
{
__glibcxx_assert
(
__m2
!=
memory_order_release
);
__glibcxx_assert
(
__m2
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m2
<=
__m1
);
__pointer_type
__p1o
=
__p1
;
__pointer_type
__p1n
=
__sync_val_compare_and_swap
(
&
_M_p
,
__p1o
,
__p2
);
// Assume extra stores (of same value) allowed in true case.
__p1
=
__p1n
;
return
__p1o
==
__p1n
;
}
bool
compare_exchange_strong
(
__pointer_type
&
__p1
,
__pointer_type
__p2
,
memory_order
__m1
,
memory_order
__m2
)
volatile
{
__glibcxx_assert
(
__m2
!=
memory_order_release
);
__glibcxx_assert
(
__m2
!=
memory_order_acq_rel
);
__glibcxx_assert
(
__m2
<=
__m1
);
__pointer_type
__p1o
=
__p1
;
__pointer_type
__p1n
=
__sync_val_compare_and_swap
(
&
_M_p
,
__p1o
,
__p2
);
// Assume extra stores (of same value) allowed in true case.
__p1
=
__p1n
;
return
__p1o
==
__p1n
;
}
__pointer_type
fetch_add
(
ptrdiff_t
__d
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_add
(
&
_M_p
,
__d
);
}
__pointer_type
fetch_add
(
ptrdiff_t
__d
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_add
(
&
_M_p
,
__d
);
}
__pointer_type
fetch_sub
(
ptrdiff_t
__d
,
memory_order
__m
=
memory_order_seq_cst
)
{
return
__sync_fetch_and_sub
(
&
_M_p
,
__d
);
}
__pointer_type
fetch_sub
(
ptrdiff_t
__d
,
memory_order
__m
=
memory_order_seq_cst
)
volatile
{
return
__sync_fetch_and_sub
(
&
_M_p
,
__d
);
}
};
}
// namespace __atomic2
_GLIBCXX_END_NAMESPACE_VERSION
}
// namespace std
#endif
atomic_base.h
0 → 100644
浏览文件 @
d61e3535
// -*- C++ -*- header.
// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file bits/atomic_base.h
* This is an internal header file, included by other library headers.
* Do not attempt to use it directly. @headername{atomic}
*/
#ifndef _GLIBCXX_ATOMIC_BASE_H
#define _GLIBCXX_ATOMIC_BASE_H 1
#pragma GCC system_header
namespace
std
_GLIBCXX_VISIBILITY
(
default
)
{
_GLIBCXX_BEGIN_NAMESPACE_VERSION
/**
* @defgroup atomics Atomics
*
* Components for performing atomic operations.
* @{
*/
/// Enumeration for memory_order
typedef
enum
memory_order
{
memory_order_relaxed
,
memory_order_consume
,
memory_order_acquire
,
memory_order_release
,
memory_order_acq_rel
,
memory_order_seq_cst
}
memory_order
;
inline
memory_order
__calculate_memory_order
(
memory_order
__m
)
{
const
bool
__cond1
=
__m
==
memory_order_release
;
const
bool
__cond2
=
__m
==
memory_order_acq_rel
;
memory_order
__mo1
(
__cond1
?
memory_order_relaxed
:
__m
);
memory_order
__mo2
(
__cond2
?
memory_order_acquire
:
__mo1
);
return
__mo2
;
}
void
atomic_thread_fence
(
memory_order
);
void
atomic_signal_fence
(
memory_order
);
/// kill_dependency
template
<
typename
_Tp
>
inline
_Tp
kill_dependency
(
_Tp
__y
)
{
_Tp
__ret
(
__y
);
return
__ret
;
}
/**
* @brief Base type for atomic_flag.
*
* Base type is POD with data, allowing atomic_flag to derive from
* it and meet the standard layout type requirement. In addition to
* compatibilty with a C interface, this allows different
* implementations of atomic_flag to use the same atomic operation
* functions, via a standard conversion to the __atomic_flag_base
* argument.
*/
_GLIBCXX_BEGIN_EXTERN_C
struct
__atomic_flag_base
{
bool
_M_i
;
};
_GLIBCXX_END_EXTERN_C
#define ATOMIC_FLAG_INIT { false }
// Base types for atomics.
//
// Three nested namespaces for atomic implementation details.
//
// The nested namespace inlined into std:: is determined by the value
// of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
// ATOMIC_*_LOCK_FREE macros.
//
// 0 == __atomic0 == Never lock-free
// 1 == __atomic1 == Best available, sometimes lock-free
// 2 == __atomic2 == Always lock-free
namespace
__atomic0
{
struct
atomic_flag
;
template
<
typename
_IntTp
>
struct
__atomic_base
;
}
namespace
__atomic2
{
struct
atomic_flag
;
template
<
typename
_IntTp
>
struct
__atomic_base
;
}
namespace
__atomic1
{
using
__atomic2
::
atomic_flag
;
using
__atomic0
::
__atomic_base
;
}
/// Lock-free Property
#if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \
&& defined(_GLIBCXX_ATOMIC_BUILTINS_4) && defined(_GLIBCXX_ATOMIC_BUILTINS_8)
# define _GLIBCXX_ATOMIC_PROPERTY 2
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic2
#elif defined(_GLIBCXX_ATOMIC_BUILTINS_1)
# define _GLIBCXX_ATOMIC_PROPERTY 1
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic1
#else
# define _GLIBCXX_ATOMIC_PROPERTY 0
# define _GLIBCXX_ATOMIC_NAMESPACE __atomic0
#endif
#define ATOMIC_CHAR_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR16_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_CHAR32_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_WCHAR_T_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_SHORT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
#define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY
inline
namespace
_GLIBCXX_ATOMIC_NAMESPACE
{
}
#define ATOMIC_VAR_INIT(_VI) { _VI }
template
<
typename
_Tp
>
struct
atomic
;
template
<
typename
_Tp
>
struct
atomic
<
_Tp
*>
;
// @} group atomics
_GLIBCXX_END_NAMESPACE_VERSION
}
// namespace std
#endif
bio.cc
浏览文件 @
d61e3535
...
@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer)
...
@@ -135,7 +135,7 @@ bread(u32 dev, u64 sector, int writer)
if
(
!
(
b
->
flags
&
B_VALID
))
if
(
!
(
b
->
flags
&
B_VALID
))
iderw
(
b
);
iderw
(
b
);
if
(
writer
&&
!
origwriter
)
{
if
(
writer
&&
!
origwriter
)
{
__sync_fetch_and_and
(
&
b
->
flags
,
~
B_BUSY
)
;
b
->
flags
&=
~
B_BUSY
;
cv_wakeup
(
&
b
->
cv
);
cv_wakeup
(
&
b
->
cv
);
}
}
return
b
;
return
b
;
...
@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer)
...
@@ -159,7 +159,7 @@ brelse(struct buf *b, int writer)
if
(
writer
)
{
if
(
writer
)
{
if
((
b
->
flags
&
B_BUSY
)
==
0
)
if
((
b
->
flags
&
B_BUSY
)
==
0
)
panic
(
"brelse"
);
panic
(
"brelse"
);
__sync_fetch_and_and
(
&
b
->
flags
,
~
B_BUSY
)
;
b
->
flags
&=
~
B_BUSY
;
cv_wakeup
(
&
b
->
cv
);
cv_wakeup
(
&
b
->
cv
);
}
}
// rcu_begin_read() happens in bread
// rcu_begin_read() happens in bread
...
...
buf.hh
浏览文件 @
d61e3535
#include "gc.hh"
#include "gc.hh"
#include "atomic.hh"
struct
buf
:
public
rcu_freed
{
struct
buf
:
public
rcu_freed
{
int
flags
;
atomic
<
int
>
flags
;
u32
dev
;
u32
dev
;
u64
sector
;
u64
sector
;
struct
buf
*
prev
;
// LRU cache list
struct
buf
*
prev
;
// LRU cache list
...
...
file.cc
浏览文件 @
d61e3535
...
@@ -28,7 +28,7 @@ filealloc(void)
...
@@ -28,7 +28,7 @@ filealloc(void)
struct
file
*
struct
file
*
filedup
(
struct
file
*
f
)
filedup
(
struct
file
*
f
)
{
{
if
(
__sync_fetch_and_add
(
&
f
->
ref
,
1
)
<
1
)
if
(
f
->
ref
++
<
1
)
panic
(
"filedup"
);
panic
(
"filedup"
);
return
f
;
return
f
;
}
}
...
@@ -37,7 +37,7 @@ filedup(struct file *f)
...
@@ -37,7 +37,7 @@ filedup(struct file *f)
void
void
fileclose
(
struct
file
*
f
)
fileclose
(
struct
file
*
f
)
{
{
if
(
subfetch
(
&
f
->
ref
,
1
)
>
0
)
if
(
--
f
->
ref
>
0
)
return
;
return
;
if
(
f
->
type
==
file
::
FD_PIPE
)
if
(
f
->
type
==
file
::
FD_PIPE
)
...
...
file.hh
浏览文件 @
d61e3535
#include "cpputil.hh"
#include "cpputil.hh"
#include "ns.hh"
#include "ns.hh"
#include "gc.hh"
#include "gc.hh"
#include "atomic.hh"
u64
namehash
(
const
strbuf
<
DIRSIZ
>&
);
u64
namehash
(
const
strbuf
<
DIRSIZ
>&
);
struct
file
{
struct
file
{
enum
{
FD_NONE
,
FD_PIPE
,
FD_INODE
,
FD_SOCKET
}
type
;
enum
{
FD_NONE
,
FD_PIPE
,
FD_INODE
,
FD_SOCKET
}
type
;
int
ref
;
// reference count
atomic
<
int
>
ref
;
// reference count
char
readable
;
char
readable
;
char
writable
;
char
writable
;
...
...
types.h
浏览文件 @
d61e3535
...
@@ -18,4 +18,5 @@ typedef pme_t pml4e_t;
...
@@ -18,4 +18,5 @@ typedef pme_t pml4e_t;
typedef
s64
ssize_t
;
typedef
s64
ssize_t
;
typedef
u64
size_t
;
typedef
u64
size_t
;
typedef
u64
off_t
;
typedef
u64
off_t
;
typedef
s64
ptrdiff_t
;
#endif
#endif
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论