Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
a149ec86
提交
a149ec86
2月 26, 2012
创建
作者:
Silas Boyd-Wickizer
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
checkpoint new sched code
上级
472e7a01
隐藏空白字符变更
内嵌
并排
正在显示
8 个修改的文件
包含
124 行增加
和
98 行删除
+124
-98
cpu.hh
include/cpu.hh
+2
-0
kernel.hh
include/kernel.hh
+5
-1
condvar.cc
kernel/condvar.cc
+1
-1
console.cc
kernel/console.cc
+1
-0
hwvm.cc
kernel/hwvm.cc
+5
-3
idle.cc
kernel/idle.cc
+87
-59
proc.cc
kernel/proc.cc
+4
-33
sched.cc
kernel/sched.cc
+19
-1
没有找到文件。
include/cpu.hh
浏览文件 @
a149ec86
...
@@ -20,6 +20,8 @@ struct cpu {
...
@@ -20,6 +20,8 @@ struct cpu {
int
timer_printpc
;
int
timer_printpc
;
atomic
<
u64
>
tlbflush_done
;
// last tlb flush req done on this cpu
atomic
<
u64
>
tlbflush_done
;
// last tlb flush req done on this cpu
struct
proc
*
prev
;
// Cpu-local storage variables; see below
// Cpu-local storage variables; see below
struct
cpu
*
cpu
;
struct
cpu
*
cpu
;
struct
proc
*
proc
;
// The currently-running process.
struct
proc
*
proc
;
// The currently-running process.
...
...
include/kernel.hh
浏览文件 @
a149ec86
...
@@ -184,13 +184,17 @@ int kill(int);
...
@@ -184,13 +184,17 @@ int kill(int);
void
pinit
(
void
);
void
pinit
(
void
);
void
procdumpall
(
void
);
void
procdumpall
(
void
);
void
scheduler
(
void
)
__noret__
;
void
scheduler
(
void
)
__noret__
;
void
sched
(
void
);
void
userinit
(
void
);
void
userinit
(
void
);
int
wait
(
void
);
int
wait
(
void
);
void
yield
(
void
);
void
yield
(
void
);
struct
proc
*
threadalloc
(
void
(
*
fn
)(
void
*
),
void
*
arg
);
struct
proc
*
threadalloc
(
void
(
*
fn
)(
void
*
),
void
*
arg
);
void
threadpin
(
void
(
*
fn
)(
void
*
),
void
*
arg
,
const
char
*
name
,
int
cpu
);
void
threadpin
(
void
(
*
fn
)(
void
*
),
void
*
arg
,
const
char
*
name
,
int
cpu
);
// XXX
void
sched
(
void
);
void
post_swtch
(
void
);
void
scheddump
(
void
);
// prof.c
// prof.c
extern
int
profenable
;
extern
int
profenable
;
void
profreset
(
void
);
void
profreset
(
void
);
...
...
kernel/condvar.cc
浏览文件 @
a149ec86
...
@@ -94,7 +94,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
...
@@ -94,7 +94,7 @@ void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
release
(
&
cv
->
lock
);
release
(
&
cv
->
lock
);
sched
();
sched
();
sti
();
//
sti();
// Reacquire original lock.
// Reacquire original lock.
acquire
(
lk
);
acquire
(
lk
);
...
...
kernel/console.cc
浏览文件 @
a149ec86
...
@@ -245,6 +245,7 @@ consoleintr(int (*getc)(void))
...
@@ -245,6 +245,7 @@ consoleintr(int (*getc)(void))
switch
(
c
){
switch
(
c
){
case
C
(
'P'
):
// Process listing.
case
C
(
'P'
):
// Process listing.
procdumpall
();
procdumpall
();
scheddump
();
break
;
break
;
case
C
(
'E'
):
// Print user-space PCs.
case
C
(
'E'
):
// Print user-space PCs.
for
(
u32
i
=
0
;
i
<
NCPU
;
i
++
)
for
(
u32
i
=
0
;
i
<
NCPU
;
i
++
)
...
...
kernel/hwvm.cc
浏览文件 @
a149ec86
...
@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
...
@@ -124,9 +124,11 @@ switchuvm(struct proc *p)
mycpu
()
->
ts
.
rsp
[
0
]
=
(
u64
)
myproc
()
->
kstack
+
KSTACKSIZE
;
mycpu
()
->
ts
.
rsp
[
0
]
=
(
u64
)
myproc
()
->
kstack
+
KSTACKSIZE
;
mycpu
()
->
ts
.
iomba
=
(
u16
)
offsetof
(
struct
taskstate
,
iopb
);
mycpu
()
->
ts
.
iomba
=
(
u16
)
offsetof
(
struct
taskstate
,
iopb
);
ltr
(
TSSSEG
);
ltr
(
TSSSEG
);
if
(
p
->
vmap
==
0
||
p
->
vmap
->
pml4
==
0
)
if
(
p
->
vmap
!=
0
&&
p
->
vmap
->
pml4
!=
0
)
panic
(
"switchuvm: no vmap/pml4"
);
lcr3
(
v2p
(
p
->
vmap
->
pml4
));
// switch to new address space
lcr3
(
v2p
(
p
->
vmap
->
pml4
));
// switch to new address space
else
switchkvm
();
popcli
();
popcli
();
}
}
...
...
kernel/idle.cc
浏览文件 @
a149ec86
...
@@ -14,76 +14,105 @@
...
@@ -14,76 +14,105 @@
#include "vm.hh"
#include "vm.hh"
#include "ns.hh"
#include "ns.hh"
static
int
__mpalign__
idle
[
NCPU
];
static
struct
proc
*
the_idle
[
NCPU
]
__mpalign__
;
static
struct
proc
*
the_idle
[
NCPU
]
__mpalign__
;
extern
void
forkret
(
void
);
void
void
idleloop
(
void
)
post_swtch
(
void
)
{
if
(
get_proc_state
(
mycpu
()
->
prev
)
==
RUNNABLE
&&
mycpu
()
->
prev
!=
the_idle
[
mycpu
()
->
id
])
addrun
(
mycpu
()
->
prev
);
release
(
&
mycpu
()
->
prev
->
lock
);
popcli
();
}
void
sched
(
void
)
{
{
extern
void
forkret
(
void
);
int
intena
;
struct
proc
*
idlep
=
the_idle
[
cpunum
()];
#if SPINLOCK_DEBUG
if
(
!
holding
(
&
myproc
()
->
lock
))
panic
(
"sched proc->lock"
);
#endif
if
(
mycpu
()
->
ncli
!=
1
)
panic
(
"sched locks"
);
if
(
get_proc_state
(
myproc
())
==
RUNNING
)
panic
(
"sched running"
);
if
(
readrflags
()
&
FL_IF
)
panic
(
"sched interruptible"
);
intena
=
mycpu
()
->
intena
;
myproc
()
->
curcycles
+=
rdtsc
()
-
myproc
()
->
tsc
;
if
(
get_proc_state
(
myproc
())
==
ZOMBIE
)
mtstop
(
myproc
());
else
mtpause
(
myproc
());
mtign
();
struct
proc
*
next
=
schednext
();
if
(
next
)
{
switchit:
pushcli
();
if
(
get_proc_state
(
next
)
!=
RUNNABLE
)
panic
(
"non-RUNNABLE next %s %u"
,
next
->
name
,
get_proc_state
(
next
));
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
struct
proc
*
prev
=
myproc
();
mycpu
()
->
proc
=
next
;
mycpu
()
->
prev
=
prev
;
switchuvm
(
next
);
set_proc_state
(
next
,
RUNNING
);
next
->
tsc
=
rdtsc
();
mtpause
(
next
);
if
(
next
->
context
->
rip
!=
(
uptr
)
forkret
&&
next
->
context
->
rip
!=
(
uptr
)
threadstub
)
{
mtresume
(
next
);
}
mtrec
();
swtch
(
&
prev
->
context
,
next
->
context
);
mycpu
()
->
intena
=
intena
;
post_swtch
();
}
else
if
(
get_proc_state
(
myproc
())
!=
RUNNABLE
)
{
next
=
the_idle
[
mycpu
()
->
id
];
goto
switchit
;
}
else
{
set_proc_state
(
myproc
(),
RUNNING
);
mycpu
()
->
intena
=
intena
;
release
(
&
myproc
()
->
lock
);
}
//swtch(&myproc()->context, mycpu()->scheduler);
//mycpu()->intena = intena;
}
void
idleloop
(
void
)
{
// Test the work queue
// Test the work queue
//extern void testwq(void);
//extern void testwq(void);
//testwq();
//testwq();
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
// mtrace_call_set(1, cpu->id);
mtstart
(
scheduler
,
idlep
);
//mtstart(scheduler, idlep);
for
(;;){
// Enable interrupts on this processor.
sti
();
sti
();
for
(;;)
{
acquire
(
&
myproc
()
->
lock
);
struct
proc
*
p
=
schednext
();
set_proc_state
(
myproc
(),
RUNNABLE
);
if
(
p
)
{
sched
();
cli
();
//acquire(&p->lock);
if
(
get_proc_state
(
p
)
!=
RUNNABLE
)
{
panic
(
"Huh?"
);
}
else
{
if
(
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
0
;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu
()
->
proc
=
p
;
switchuvm
(
p
);
set_proc_state
(
p
,
RUNNING
);
p
->
tsc
=
rdtsc
();
mtpause
(
idlep
);
if
(
p
->
context
->
rip
!=
(
uptr
)
forkret
&&
p
->
context
->
rip
!=
(
uptr
)
threadstub
)
{
mtresume
(
p
);
}
mtrec
();
swtch
(
&
mycpu
()
->
scheduler
,
myproc
()
->
context
);
mtresume
(
idlep
);
mtign
();
switchkvm
();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu
()
->
proc
=
idlep
;
if
(
get_proc_state
(
p
)
==
RUNNABLE
)
addrun
(
p
);
release
(
&
p
->
lock
);
}
}
else
{
if
(
steal
())
{
if
(
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
0
;
}
else
{
if
(
!
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
1
;
}
}
if
(
idle
[
mycpu
()
->
id
]
)
{
if
(
steal
()
==
0
)
{
int
worked
;
int
worked
;
do
{
do
{
assert
(
mycpu
()
->
ncli
==
0
);
assert
(
mycpu
()
->
ncli
==
0
);
...
@@ -106,5 +135,4 @@ initidle(void)
...
@@ -106,5 +135,4 @@ initidle(void)
mycpu
()
->
proc
=
p
;
mycpu
()
->
proc
=
p
;
myproc
()
->
cpu_pin
=
1
;
myproc
()
->
cpu_pin
=
1
;
the_idle
[
cpunum
()]
=
p
;
the_idle
[
cpunum
()]
=
p
;
idle
[
cpunum
()]
=
1
;
}
}
kernel/proc.cc
浏览文件 @
a149ec86
...
@@ -30,33 +30,6 @@ struct kstack_tag kstack_tag[NCPU];
...
@@ -30,33 +30,6 @@ struct kstack_tag kstack_tag[NCPU];
enum
{
sched_debug
=
0
};
enum
{
sched_debug
=
0
};
void
sched
(
void
)
{
int
intena
;
#if SPINLOCK_DEBUG
if
(
!
holding
(
&
myproc
()
->
lock
))
panic
(
"sched proc->lock"
);
#endif
if
(
mycpu
()
->
ncli
!=
1
)
panic
(
"sched locks"
);
if
(
get_proc_state
(
myproc
())
==
RUNNING
)
panic
(
"sched running"
);
if
(
readrflags
()
&
FL_IF
)
panic
(
"sched interruptible"
);
intena
=
mycpu
()
->
intena
;
myproc
()
->
curcycles
+=
rdtsc
()
-
myproc
()
->
tsc
;
if
(
get_proc_state
(
myproc
())
==
ZOMBIE
)
mtstop
(
myproc
());
else
mtpause
(
myproc
());
mtign
();
swtch
(
&
myproc
()
->
context
,
mycpu
()
->
scheduler
);
mycpu
()
->
intena
=
intena
;
}
// Give up the CPU for one scheduling round.
// Give up the CPU for one scheduling round.
void
void
yield
(
void
)
yield
(
void
)
...
@@ -64,7 +37,7 @@ yield(void)
...
@@ -64,7 +37,7 @@ yield(void)
acquire
(
&
myproc
()
->
lock
);
//DOC: yieldlock
acquire
(
&
myproc
()
->
lock
);
//DOC: yieldlock
set_proc_state
(
myproc
(),
RUNNABLE
);
set_proc_state
(
myproc
(),
RUNNABLE
);
sched
();
sched
();
sti
();
//
sti();
//release(&myproc()->lock);
//release(&myproc()->lock);
}
}
...
@@ -74,8 +47,8 @@ yield(void)
...
@@ -74,8 +47,8 @@ yield(void)
void
void
forkret
(
void
)
forkret
(
void
)
{
{
sti
();
post_swtch
();
//release(&myproc()->lock);
// Just for the first process. can't do it earlier
// Just for the first process. can't do it earlier
// b/c file system code needs a process context
// b/c file system code needs a process context
// in which to call cv_sleep().
// in which to call cv_sleep().
...
@@ -351,7 +324,6 @@ void
...
@@ -351,7 +324,6 @@ void
procdumpall
(
void
)
procdumpall
(
void
)
{
{
static
const
char
*
states
[]
=
{
static
const
char
*
states
[]
=
{
/* [UNUSED] = */
"unused"
,
/* [EMBRYO] = */
"embryo"
,
/* [EMBRYO] = */
"embryo"
,
/* [SLEEPING] = */
"sleep "
,
/* [SLEEPING] = */
"sleep "
,
/* [RUNNABLE] = */
"runble"
,
/* [RUNNABLE] = */
"runble"
,
...
@@ -500,8 +472,7 @@ wait(void)
...
@@ -500,8 +472,7 @@ wait(void)
void
void
threadhelper
(
void
(
*
fn
)(
void
*
),
void
*
arg
)
threadhelper
(
void
(
*
fn
)(
void
*
),
void
*
arg
)
{
{
sti
();
post_swtch
();
//release(&myproc()->lock);
mtstart
(
fn
,
myproc
());
mtstart
(
fn
,
myproc
());
fn
(
arg
);
fn
(
arg
);
exit
();
exit
();
...
...
kernel/sched.cc
浏览文件 @
a149ec86
...
@@ -128,11 +128,29 @@ initsched(void)
...
@@ -128,11 +128,29 @@ initsched(void)
int
i
;
int
i
;
for
(
i
=
0
;
i
<
NCPU
;
i
++
)
{
for
(
i
=
0
;
i
<
NCPU
;
i
++
)
{
initlock
(
&
runq
[
i
].
lock
,
"runq"
,
LOCKSTAT_SCHED
);
initlock
(
&
runq
[
i
].
lock
,
"runq"
,
LOCKSTAT_SCHED
);
STAILQ_INIT
(
&
runq
[
i
].
q
);
STAILQ_INIT
(
&
runq
[
i
].
q
);
}
}
}
}
void
scheddump
(
void
)
{
struct
proc
*
p
;
int
i
;
for
(
i
=
0
;
i
<
NCPU
;
i
++
)
{
struct
runq
*
q
=
&
runq
[
i
];
cprintf
(
"%u
\n
"
,
i
);
acquire
(
&
q
->
lock
);
STAILQ_FOREACH
(
p
,
&
q
->
q
,
runqlink
)
{
cprintf
(
" %s
\n
"
,
p
->
name
);
}
release
(
&
q
->
lock
);
}
}
#if 0
#if 0
static int
static int
migrate(struct proc *p)
migrate(struct proc *p)
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论