Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
c271fbf2
提交
c271fbf2
4月 05, 2012
创建
作者:
Silas Boyd-Wickizer
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Try to separate process schedules from other scheduling related code
上级
7c717316
隐藏空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
205 行增加
和
174 行删除
+205
-174
kernel.hh
include/kernel.hh
+2
-0
proc.hh
include/proc.hh
+2
-4
sched.hh
include/sched.hh
+5
-4
console.cc
kernel/console.cc
+3
-0
idle.cc
kernel/idle.cc
+0
-1
proc.cc
kernel/proc.cc
+1
-3
sched.cc
kernel/sched.cc
+192
-162
没有找到文件。
include/kernel.hh
浏览文件 @
c271fbf2
...
...
@@ -189,6 +189,8 @@ void addrun(struct proc *);
void
sched
(
void
);
void
post_swtch
(
void
);
void
scheddump
(
void
);
int
steal
(
void
);
void
addrun
(
struct
proc
*
);
// spinlock.c
void
acquire
(
struct
spinlock
*
);
...
...
include/proc.hh
浏览文件 @
c271fbf2
...
...
@@ -6,6 +6,7 @@
#include "fs.h"
#include "file.hh"
#include "filetable.hh"
#include "sched.hh"
class
uwq
;
class
uwq_worker
;
...
...
@@ -44,7 +45,7 @@ typedef enum procstate {
}
procstate_t
;;
// Per-process state
struct
proc
:
public
rcu_freed
{
struct
proc
:
public
rcu_freed
,
public
sched_link
{
struct
vmap
*
vmap
;
// va -> vma
uwq
*
uwq
;
uwq_worker
*
worker
;
...
...
@@ -70,9 +71,6 @@ struct proc : public rcu_freed {
#if MTRACE
struct
mtrace_stacks
mtrace_stacks
;
#endif
struct
runq
*
runq
;
STAILQ_ENTRY
(
proc
)
runqlink
;
struct
condvar
*
oncv
;
// Where it is sleeping, for kill()
u64
cv_wakeup
;
// Wakeup time for this process
LIST_ENTRY
(
proc
)
cv_waiters
;
// Linked list of processes waiting for oncv
...
...
include/sched.hh
浏览文件 @
c271fbf2
void
delrun
(
struct
proc
*
);
struct
proc
*
schednext
(
void
);
int
steal
(
void
);
void
addrun
(
struct
proc
*
);
struct
sched_link
{
sched_link
*
prev
;
sched_link
*
next
;
};
kernel/console.cc
浏览文件 @
c271fbf2
...
...
@@ -271,6 +271,9 @@ consoleintr(int (*getc)(void))
case
C
(
'W'
):
wq_dump
();
break
;
case
C
(
'S'
):
scheddump
();
break
;
case
C
(
'F'
):
// kmem stats
kmemprint
();
break
;
...
...
kernel/idle.cc
浏览文件 @
c271fbf2
...
...
@@ -5,7 +5,6 @@
#include "condvar.h"
#include "proc.hh"
#include "cpu.hh"
#include "sched.hh"
#include "percpu.hh"
#include "wq.hh"
#include "uwq.hh"
...
...
kernel/proc.cc
浏览文件 @
c271fbf2
...
...
@@ -9,7 +9,6 @@
#include "cpu.hh"
#include "bits.hh"
#include "kmtrace.hh"
#include "sched.hh"
#include "kalloc.hh"
#include "vm.hh"
#include "ns.hh"
...
...
@@ -41,7 +40,7 @@ proc::proc(int npid) :
rcu_freed
(
"proc"
),
vmap
(
0
),
uwq
(
0
),
worker
(
0
),
kstack
(
0
),
pid
(
npid
),
parent
(
0
),
tf
(
0
),
context
(
0
),
killed
(
0
),
ftable
(
0
),
cwd
(
0
),
tsc
(
0
),
curcycles
(
0
),
cpuid
(
0
),
epoch
(
0
),
cpu_pin
(
0
),
runq
(
0
),
oncv
(
0
),
cv_wakeup
(
0
),
cpu_pin
(
0
),
oncv
(
0
),
cv_wakeup
(
0
),
user_fs_
(
0
),
unmap_tlbreq_
(
0
),
in_exec_
(
0
),
uaccess_
(
0
),
exception_inuse
(
0
),
state_
(
EMBRYO
)
{
...
...
@@ -51,7 +50,6 @@ proc::proc(int npid) :
memset
(
&
childq
,
0
,
sizeof
(
childq
));
memset
(
&
child_next
,
0
,
sizeof
(
child_next
));
memset
(
&
runqlink
,
0
,
sizeof
(
runqlink
));
memset
(
&
cv_waiters
,
0
,
sizeof
(
cv_waiters
));
memset
(
&
cv_sleep
,
0
,
sizeof
(
cv_sleep
));
memset
(
__cxa_eh_global
,
0
,
sizeof
(
__cxa_eh_global
));
...
...
kernel/sched.cc
浏览文件 @
c271fbf2
...
...
@@ -9,21 +9,145 @@
#include "cpu.hh"
#include "bits.hh"
#include "kmtrace.hh"
#include "sched.hh"
#include "vm.hh"
#include "wq.hh"
#include "percpu.hh"
#include "sperf.hh"
enum
{
sched_debug
=
0
};
enum
{
steal_nonexec
=
1
};
struct
runq
{
STAILQ_HEAD
(
queue
,
proc
)
q
;
struct
spinlock
lock
;
volatile
u64
len
__mpalign__
;
__padout__
;
class
schedule
{
public
:
schedule
();
void
enq
(
proc
*
entry
);
proc
*
deq
();
proc
*
steal
(
bool
nonexec
);
void
dump
();
static
void
*
operator
new
(
unsigned
long
nbytes
,
schedule
*
buf
)
{
assert
(
nbytes
==
sizeof
(
schedule
));
return
buf
;
}
private
:
struct
spinlock
lock_
;
sched_link
head_
;
void
sanity
(
void
);
struct
{
std
::
atomic
<
u64
>
enqs
;
std
::
atomic
<
u64
>
deqs
;
std
::
atomic
<
u64
>
steals
;
}
stats_
;
u64
ncansteal_
__mpalign__
;
};
percpu
<
schedule
>
schedule_
;
static
bool
cansteal
(
proc
*
p
,
bool
nonexec
);
schedule
::
schedule
(
void
)
{
initlock
(
&
lock_
,
"schedule::lock_"
,
LOCKSTAT_SCHED
);
head_
.
next
=
&
head_
;
head_
.
prev
=
&
head_
;
ncansteal_
=
0
;
stats_
.
enqs
=
0
;
stats_
.
deqs
=
0
;
stats_
.
steals
=
0
;
}
static
struct
runq
runq
[
NCPU
]
__mpalign__
;
void
schedule
::
enq
(
proc
*
p
)
{
sched_link
*
entry
=
p
;
// Add to tail
scoped_acquire
x
(
&
lock_
);
entry
->
next
=
&
head_
;
entry
->
prev
=
head_
.
prev
;
head_
.
prev
->
next
=
entry
;
head_
.
prev
=
entry
;
if
(
cansteal
((
proc
*
)
entry
,
true
))
ncansteal_
++
;
sanity
();
stats_
.
enqs
++
;
}
proc
*
schedule
::
deq
(
void
)
{
// Remove from head
scoped_acquire
x
(
&
lock_
);
sched_link
*
entry
=
head_
.
next
;
if
(
entry
==
&
head_
)
return
nullptr
;
entry
->
next
->
prev
=
entry
->
prev
;
entry
->
prev
->
next
=
entry
->
next
;
if
(
cansteal
((
proc
*
)
entry
,
true
))
ncansteal_
--
;
sanity
();
stats_
.
deqs
++
;
return
(
proc
*
)
entry
;
}
proc
*
schedule
::
steal
(
bool
nonexec
)
{
if
(
ncansteal_
==
0
||
!
tryacquire
(
&
lock_
))
return
nullptr
;
for
(
sched_link
*
ptr
=
head_
.
next
;
ptr
!=
&
head_
;
ptr
=
ptr
->
next
)
if
(
cansteal
((
proc
*
)
ptr
,
nonexec
))
{
ptr
->
next
->
prev
=
ptr
->
prev
;
ptr
->
prev
->
next
=
ptr
->
next
;
ncansteal_
--
;
sanity
();
stats_
.
steals
++
;
release
(
&
lock_
);
return
(
proc
*
)
ptr
;
}
release
(
&
lock_
);
return
nullptr
;
}
void
schedule
::
dump
(
void
)
{
cprintf
(
"%lu %lu %lu
\n
"
,
stats_
.
enqs
.
load
(),
stats_
.
deqs
.
load
(),
stats_
.
steals
.
load
());
stats_
.
enqs
=
0
;
stats_
.
deqs
=
0
;
stats_
.
steals
=
0
;
}
void
schedule
::
sanity
(
void
)
{
#if DEBUG
u64
n
=
0
;
for
(
sched_link
*
ptr
=
head_
.
next
;
ptr
!=
&
head_
;
ptr
=
ptr
->
next
)
if
(
cansteal
((
proc
*
)
ptr
,
true
))
n
++
;
if
(
n
!=
ncansteal_
)
panic
(
"schedule::sanity: %lu != %lu"
,
n
,
ncansteal_
);
#endif
}
static
bool
cansteal
(
proc
*
p
,
bool
nonexec
)
{
return
(
p
->
get_state
()
==
RUNNABLE
&&
!
p
->
cpu_pin
&&
(
p
->
in_exec_
||
nonexec
)
&&
p
->
curcycles
!=
0
&&
p
->
curcycles
>
VICTIMAGE
);
}
void
post_swtch
(
void
)
...
...
@@ -35,14 +159,67 @@ post_swtch(void)
wqcrit_trywork
();
}
int
steal
(
void
)
{
struct
proc
*
steal
;
int
r
=
0
;
pushcli
();
for
(
int
nonexec
=
0
;
nonexec
<
(
steal_nonexec
?
2
:
1
);
nonexec
++
)
{
for
(
int
i
=
1
;
i
<
ncpu
;
i
++
)
{
steal
=
schedule_
[
i
].
steal
(
nonexec
);
if
(
steal
!=
nullptr
)
{
acquire
(
&
steal
->
lock
);
if
(
steal
->
get_state
()
==
RUNNABLE
&&
!
steal
->
cpu_pin
&&
steal
->
curcycles
!=
0
&&
steal
->
curcycles
>
VICTIMAGE
)
{
steal
->
curcycles
=
0
;
steal
->
cpuid
=
mycpu
()
->
id
;
addrun
(
steal
);
release
(
&
steal
->
lock
);
r
=
1
;
goto
found
;
}
if
(
steal
->
get_state
()
==
RUNNABLE
)
{
addrun
(
steal
);
}
release
(
&
steal
->
lock
);
}
}
}
found
:
popcli
();
return
r
;
}
void
scheddump
(
void
)
{
for
(
int
i
=
0
;
i
<
NCPU
;
i
++
)
{
cprintf
(
"%u "
,
i
);
schedule_
[
i
].
dump
();
}
}
void
addrun
(
struct
proc
*
p
)
{
p
->
set_state
(
RUNNABLE
);
schedule_
[
p
->
cpuid
].
enq
(
p
);
}
void
sched
(
void
)
{
extern
void
threadstub
(
void
);
extern
void
forkret
(
void
);
extern
void
idleheir
(
void
*
x
);
int
intena
;
proc
*
prev
;
proc
*
next
;
#if SPINLOCK_DEBUG
if
(
!
holding
(
&
myproc
()
->
lock
))
...
...
@@ -53,6 +230,7 @@ sched(void)
extern
void
idlebequeath
(
void
);
idlebequeath
();
}
if
(
mycpu
()
->
ncli
!=
1
)
panic
(
"sched locks"
);
if
(
myproc
()
->
get_state
()
==
RUNNING
)
...
...
@@ -62,7 +240,9 @@ sched(void)
intena
=
mycpu
()
->
intena
;
myproc
()
->
curcycles
+=
rdtsc
()
-
myproc
()
->
tsc
;
struct
proc
*
next
=
schednext
();
// Interrupts are disabled
next
=
schedule_
->
deq
();
if
(
next
==
nullptr
)
{
if
(
myproc
()
->
get_state
()
!=
RUNNABLE
||
// proc changed its CPU pin?
...
...
@@ -79,7 +259,7 @@ sched(void)
if
(
next
->
get_state
()
!=
RUNNABLE
)
panic
(
"non-RUNNABLE next %s %u"
,
next
->
name
,
next
->
get_state
());
struct
proc
*
prev
=
myproc
();
prev
=
myproc
();
mycpu
()
->
proc
=
next
;
mycpu
()
->
prev
=
prev
;
...
...
@@ -107,158 +287,8 @@ sched(void)
}
void
addrun
(
struct
proc
*
p
)
{
// Always called with p->lock held
struct
runq
*
q
;
p
->
set_state
(
RUNNABLE
);
q
=
&
runq
[
p
->
cpuid
];
acquire
(
&
q
->
lock
);
STAILQ_INSERT_HEAD
(
&
q
->
q
,
p
,
runqlink
);
p
->
runq
=
q
;
q
->
len
++
;
release
(
&
q
->
lock
);
}
void
delrun
(
struct
proc
*
p
)
{
// Always called with p->lock held
struct
runq
*
q
;
q
=
p
->
runq
;
acquire
(
&
q
->
lock
);
STAILQ_REMOVE
(
&
q
->
q
,
p
,
proc
,
runqlink
);
q
->
len
--
;
release
(
&
q
->
lock
);
}
int
steal
(
void
)
{
struct
proc
*
steal
;
int
r
=
0
;
pushcli
();
for
(
int
nonexec
=
0
;
nonexec
<
(
steal_nonexec
?
2
:
1
);
nonexec
++
)
{
for
(
int
i
=
1
;
i
<
ncpu
;
i
++
)
{
struct
runq
*
q
=
&
runq
[(
i
+
mycpu
()
->
id
)
%
ncpu
];
struct
proc
*
p
;
if
(
q
->
len
==
0
)
continue
;
// XXX(sbw) Look for a process to steal. Acquiring q->lock
// then p->lock can result in deadlock. So we acquire
// q->lock, scan for a process, drop q->lock, acquire p->lock,
// and then check that it's still ok to steal p.
steal
=
nullptr
;
if
(
tryacquire
(
&
q
->
lock
)
==
0
)
continue
;
STAILQ_FOREACH
(
p
,
&
q
->
q
,
runqlink
)
{
if
(
p
->
get_state
()
==
RUNNABLE
&&
!
p
->
cpu_pin
&&
(
p
->
in_exec_
||
nonexec
)
&&
p
->
curcycles
!=
0
&&
p
->
curcycles
>
VICTIMAGE
)
{
STAILQ_REMOVE
(
&
q
->
q
,
p
,
proc
,
runqlink
);
q
->
len
--
;
steal
=
p
;
break
;
}
}
release
(
&
q
->
lock
);
if
(
steal
)
{
acquire
(
&
steal
->
lock
);
if
(
steal
->
get_state
()
==
RUNNABLE
&&
!
steal
->
cpu_pin
&&
steal
->
curcycles
!=
0
&&
steal
->
curcycles
>
VICTIMAGE
)
{
steal
->
curcycles
=
0
;
steal
->
cpuid
=
mycpu
()
->
id
;
addrun
(
steal
);
release
(
&
steal
->
lock
);
r
=
1
;
goto
found
;
}
if
(
steal
->
get_state
()
==
RUNNABLE
)
addrun
(
steal
);
release
(
&
steal
->
lock
);
}
}
}
found:
popcli
();
return
r
;
}
struct
proc
*
schednext
(
void
)
{
// No locks, interrupts enabled
struct
runq
*
q
;
struct
proc
*
p
=
nullptr
;
pushcli
();
q
=
&
runq
[
mycpu
()
->
id
];
acquire
(
&
q
->
lock
);
p
=
STAILQ_LAST
(
&
q
->
q
,
proc
,
runqlink
);
if
(
p
)
{
STAILQ_REMOVE
(
&
q
->
q
,
p
,
proc
,
runqlink
);
q
->
len
--
;
}
release
(
&
q
->
lock
);
popcli
();
return
p
;
}
void
initsched
(
void
)
{
int
i
;
for
(
i
=
0
;
i
<
NCPU
;
i
++
)
{
initlock
(
&
runq
[
i
].
lock
,
"runq"
,
LOCKSTAT_SCHED
);
STAILQ_INIT
(
&
runq
[
i
].
q
);
runq
[
i
].
len
=
0
;
}
}
#if 0
static int
migrate(struct proc *p)
{
// p should not be running, or be on a runqueue, or be myproc()
int c;
if (p == myproc())
panic("migrate: myproc");
for (c = 0; c < ncpu; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state == RUNNING)
panic("migrate: pid %u name %s is running",
p->pid, p->name);
if (p->cpu_pin)
panic("migrate: pid %u name %s is pinned",
p->pid, p->name);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
release(&p->lock);
return 0;
}
}
return -1;
for
(
int
i
=
0
;
i
<
NCPU
;
i
++
)
new
(
&
schedule_
[
i
])
schedule
();
}
#endif
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论