Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
dbc11d57
提交
dbc11d57
1月 25, 2012
创建
作者:
Frans Kaashoek
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Copy user-level changes back into kernel
上级
e0554fc2
隐藏空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
39 行增加
和
11 行删除
+39
-11
gc.c
gc.c
+39
-11
没有找到文件。
gc.c
浏览文件 @
dbc11d57
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
// a process maintain an epoch (>= global_epoch)
// a process maintain an epoch (>= global_epoch)
// one gc thread and state (e.g., NEPOCH delaylists and one tofreelists) per core
// one gc thread and state (e.g., NEPOCH delaylists and one tofreelists) per core
// a process add to its core epoch's delayed freelist on delayed_free
// a process add to its core epoch's delayed freelist on delayed_free
// a gcc performs two jobs:
// a gcc
thread
performs two jobs:
// 1. one gcc thread perform step 1:
// 1. one gcc thread perform step 1:
// updates a thread's epoch, when not in an epoch
// updates a thread's epoch, when not in an epoch
// compute min over all process's epochs, and sets global_epoch to min
// compute min over all process's epochs, and sets global_epoch to min
...
@@ -24,6 +24,8 @@
...
@@ -24,6 +24,8 @@
enum
{
gc_debug
=
0
};
enum
{
gc_debug
=
0
};
#define NGC 10000
struct
gc
{
struct
gc
{
u64
epoch
;
u64
epoch
;
struct
gc
*
next
;
struct
gc
*
next
;
...
@@ -48,6 +50,9 @@ static struct gc_state {
...
@@ -48,6 +50,9 @@ static struct gc_state {
struct
gc
tofree
[
NEPOCH
];
struct
gc
tofree
[
NEPOCH
];
int
ndelayed
;
int
ndelayed
;
int
min_epoch
;
int
min_epoch
;
int
nrun
;
int
nfree
;
int
cnt
;
}
__mpalign__
gc_state
[
NCPU
]
__mpalign__
;
}
__mpalign__
gc_state
[
NCPU
]
__mpalign__
;
static
struct
{
struct
spinlock
l
__mpalign__
;
}
gc_lock
;
static
struct
{
struct
spinlock
l
__mpalign__
;
}
gc_lock
;
...
@@ -58,7 +63,7 @@ gc_alloc()
...
@@ -58,7 +63,7 @@ gc_alloc()
{
{
struct
gc
*
r
=
kmalloc
(
sizeof
(
struct
gc
));
struct
gc
*
r
=
kmalloc
(
sizeof
(
struct
gc
));
assert
(
r
);
assert
(
r
);
gc_state
[
mycpu
()
->
id
].
ndelayed
++
;
__sync_fetch_and_add
(
&
gc_state
[
mycpu
()
->
id
].
ndelayed
,
1
)
;
return
r
;
return
r
;
}
}
...
@@ -66,11 +71,14 @@ static void *
...
@@ -66,11 +71,14 @@ static void *
gc_min
(
void
*
vkey
,
void
*
v
,
void
*
arg
){
gc_min
(
void
*
vkey
,
void
*
v
,
void
*
arg
){
u64
*
min_epoch_p
=
arg
;
u64
*
min_epoch_p
=
arg
;
struct
proc
*
p
=
(
struct
proc
*
)
v
;
struct
proc
*
p
=
(
struct
proc
*
)
v
;
// Some threads may never call begin/end_epoch(), and never update
// p->epoch, so gc_thread does it for them. XXX get rid off lock?
acquire
(
&
p
->
gc_epoch_lock
);
acquire
(
&
p
->
gc_epoch_lock
);
if
(
p
->
epoch_depth
==
0
)
{
if
(
p
->
epoch_depth
==
0
)
{
p
->
epoch
=
global_epoch
;
p
->
epoch
=
global_epoch
;
}
}
release
(
&
p
->
gc_epoch_lock
);
release
(
&
p
->
gc_epoch_lock
);
// cprintf("gc_min %d(%s): %lu %ld\n", p->pid, p->name, p->epoch, p->epoch_depth);
if
(
*
min_epoch_p
>
p
->
epoch
)
{
if
(
*
min_epoch_p
>
p
->
epoch
)
{
*
min_epoch_p
=
p
->
epoch
;
*
min_epoch_p
=
p
->
epoch
;
}
}
...
@@ -147,8 +155,8 @@ static void
...
@@ -147,8 +155,8 @@ static void
gc_move_to_tofree
(
u64
epoch
)
gc_move_to_tofree
(
u64
epoch
)
{
{
if
(
gc_debug
)
if
(
gc_debug
)
cprintf
(
"%d:
free epoch %ld
\n
"
,
mycpu
()
->
id
,
epoch
);
cprintf
(
"%d:
move epoch %ld to tofreelist
\n
"
,
mycpu
()
->
id
,
epoch
);
for
(
int
c
=
0
;
c
<
NCPU
;
c
++
)
{
for
(
int
c
=
0
;
c
<
ncpu
;
c
++
)
{
gc_move_to_tofree_cpu
(
c
,
epoch
);
gc_move_to_tofree_cpu
(
c
,
epoch
);
}
}
int
ok
=
__sync_bool_compare_and_swap
(
&
global_epoch
,
epoch
,
epoch
+
1
);
int
ok
=
__sync_bool_compare_and_swap
(
&
global_epoch
,
epoch
,
epoch
+
1
);
...
@@ -166,12 +174,15 @@ gc_delayfreelist(void)
...
@@ -166,12 +174,15 @@ gc_delayfreelist(void)
u64
global
=
global_epoch
;
u64
global
=
global_epoch
;
u64
min
=
global
;
u64
min
=
global
;
// make that global_epoch doesn't run into a core's min_epoch
// make that global_epoch doesn't run into a core's min_epoch
for
(
int
c
=
0
;
c
<
NCPU
;
c
++
)
{
for
(
int
c
=
0
;
c
<
ncpu
;
c
++
)
{
int
w
=
gc_state
[
c
].
min_epoch
+
NEPOCH
-
1
;
int
w
=
gc_state
[
c
].
min_epoch
+
NEPOCH
-
1
;
if
(
w
<
min
)
{
if
(
w
<
min
)
{
min
=
w
;
min
=
w
;
}
}
}
}
if
(
gc_debug
)
{
cprintf
(
"(%d,%d) (%s): min %lu global %lu
\n
"
,
myproc
()
->
cpuid
,
myproc
()
->
pid
,
myproc
()
->
name
,
min
,
global
);
}
myproc
()
->
epoch_depth
++
;
// ensure ns_enumate's call to gc_begin_epoch doesn't have sideeffects
myproc
()
->
epoch_depth
++
;
// ensure ns_enumate's call to gc_begin_epoch doesn't have sideeffects
ns_enumerate
(
nspid
,
gc_min
,
&
min
);
ns_enumerate
(
nspid
,
gc_min
,
&
min
);
myproc
()
->
epoch_depth
--
;
myproc
()
->
epoch_depth
--
;
...
@@ -229,7 +240,9 @@ gc_delayed2(int a1, u64 a2, void (*dofree)(int,u64))
...
@@ -229,7 +240,9 @@ gc_delayed2(int a1, u64 a2, void (*dofree)(int,u64))
void
void
gc_start
(
void
)
gc_start
(
void
)
{
{
cv_wakeup
(
&
gc_state
[
mycpu
()
->
id
].
cv
);
// XXX hack?
if
(
gc_state
[
mycpu
()
->
id
].
cnt
++
%
10000
==
0
)
cv_wakeup
(
&
gc_state
[
mycpu
()
->
id
].
cv
);
}
}
void
void
...
@@ -252,8 +265,19 @@ gc_end_epoch(void)
...
@@ -252,8 +265,19 @@ gc_end_epoch(void)
acquire
(
&
myproc
()
->
gc_epoch_lock
);
acquire
(
&
myproc
()
->
gc_epoch_lock
);
--
myproc
()
->
epoch_depth
;
--
myproc
()
->
epoch_depth
;
release
(
&
myproc
()
->
gc_epoch_lock
);
release
(
&
myproc
()
->
gc_epoch_lock
);
if
(
myproc
()
->
epoch_depth
==
0
&&
gc_state
[
mycpu
()
->
id
].
ndelayed
>
NGC
)
gc_start
();
}
void
gc_dumpstat
(
void
)
{
for
(
int
i
=
0
;
i
<
ncpu
;
i
++
)
{
cprintf
(
"worker %d: %d %d
\n
"
,
i
,
gc_state
[
i
].
nrun
,
gc_state
[
i
].
nfree
);
}
}
}
static
void
static
void
gc_worker
(
void
*
x
)
gc_worker
(
void
*
x
)
{
{
...
@@ -268,14 +292,17 @@ gc_worker(void *x)
...
@@ -268,14 +292,17 @@ gc_worker(void *x)
acquire
(
&
wl
);
acquire
(
&
wl
);
cv_sleep
(
&
gc_state
[
mycpu
()
->
id
].
cv
,
&
wl
);
cv_sleep
(
&
gc_state
[
mycpu
()
->
id
].
cv
,
&
wl
);
release
(
&
wl
);
release
(
&
wl
);
gc_state
[
mycpu
()
->
id
].
nrun
++
;
u64
global
=
global_epoch
;
u64
global
=
global_epoch
;
myproc
()
->
epoch
=
global_epoch
;
// move the gc thread to next epoch
for
(
i
=
gc_state
[
mycpu
()
->
id
].
min_epoch
;
i
<
global
-
2
;
i
++
)
{
for
(
i
=
gc_state
[
mycpu
()
->
id
].
min_epoch
;
i
<
global
-
2
;
i
++
)
{
int
nfree
=
gc_free_tofreelist
(
&
(
gc_state
[
mycpu
()
->
id
].
tofree
[
i
%
NEPOCH
].
next
),
i
);
int
nfree
=
gc_free_tofreelist
(
&
(
gc_state
[
mycpu
()
->
id
].
tofree
[
i
%
NEPOCH
].
next
),
i
);
gc_state
[
mycpu
()
->
id
].
tofree
[
i
%
NEPOCH
].
epoch
+=
NEPOCH
;
gc_state
[
mycpu
()
->
id
].
tofree
[
i
%
NEPOCH
].
epoch
+=
NEPOCH
;
if
(
gc_debug
&&
nfree
>
0
)
{
__sync_fetch_and_sub
(
&
gc_state
[
mycpu
()
->
id
].
ndelayed
,
nfree
);
if
(
0
&&
nfree
>
0
)
{
cprintf
(
"%d: epoch %lu freed %d
\n
"
,
mycpu
()
->
id
,
i
,
nfree
);
cprintf
(
"%d: epoch %lu freed %d
\n
"
,
mycpu
()
->
id
,
i
,
nfree
);
}
}
gc_state
[
mycpu
()
->
id
].
nfree
+=
nfree
;
}
}
gc_state
[
mycpu
()
->
id
].
min_epoch
=
i
;
gc_state
[
mycpu
()
->
id
].
min_epoch
=
i
;
gc_delayfreelist
();
gc_delayfreelist
();
...
@@ -286,17 +313,17 @@ void
...
@@ -286,17 +313,17 @@ void
initprocgc
(
struct
proc
*
p
)
initprocgc
(
struct
proc
*
p
)
{
{
p
->
epoch
=
global_epoch
;
p
->
epoch
=
global_epoch
;
p
->
epoch_depth
=
0
;
initlock
(
&
p
->
gc_epoch_lock
,
"per process gc_lock"
);
initlock
(
&
p
->
gc_epoch_lock
,
"per process gc_lock"
);
}
}
void
void
initgc
(
void
)
initgc
(
void
)
{
{
initlock
(
&
gc_lock
.
l
,
"gc"
);
initlock
(
&
gc_lock
.
l
,
"gc"
);
global_epoch
=
NEPOCH
-
2
;
global_epoch
=
NEPOCH
-
2
;
for
(
int
i
=
0
;
i
<
NCPU
;
i
++
)
{
for
(
int
i
=
0
;
i
<
ncpu
;
i
++
)
{
for
(
int
j
=
0
;
j
<
NEPOCH
;
j
++
)
{
for
(
int
j
=
0
;
j
<
NEPOCH
;
j
++
)
{
gc_state
[
i
].
delayed
[
j
].
epoch
=
j
;
gc_state
[
i
].
delayed
[
j
].
epoch
=
j
;
gc_state
[
i
].
tofree
[
j
].
epoch
=
j
;
gc_state
[
i
].
tofree
[
j
].
epoch
=
j
;
...
@@ -304,7 +331,7 @@ initgc(void)
...
@@ -304,7 +331,7 @@ initgc(void)
initcondvar
(
&
gc_state
[
i
].
cv
,
"gc_cv"
);
initcondvar
(
&
gc_state
[
i
].
cv
,
"gc_cv"
);
}
}
for
(
u32
c
=
0
;
c
<
NCPU
;
c
++
)
{
for
(
u32
c
=
0
;
c
<
ncpu
;
c
++
)
{
struct
proc
*
gcp
;
struct
proc
*
gcp
;
gcp
=
threadalloc
(
gc_worker
,
NULL
);
gcp
=
threadalloc
(
gc_worker
,
NULL
);
...
@@ -315,6 +342,7 @@ initgc(void)
...
@@ -315,6 +342,7 @@ initgc(void)
gcp
->
cpuid
=
c
;
gcp
->
cpuid
=
c
;
gcp
->
cpu_pin
=
1
;
gcp
->
cpu_pin
=
1
;
acquire
(
&
gcp
->
lock
);
acquire
(
&
gcp
->
lock
);
gcp
->
state
=
RUNNABLE
;
addrun
(
gcp
);
addrun
(
gcp
);
release
(
&
gcp
->
lock
);
release
(
&
gcp
->
lock
);
}
}
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论