Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
65bd8e13
提交
65bd8e13
7月 16, 2006
创建
作者:
rsc
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
New scheduler.
Removed cli and sti stack in favor of tracking number of locks held on each CPU and explicit conditionals in spinlock.c.
上级
40a2a083
显示空白字符变更
内嵌
并排
正在显示
11 个修改的文件
包含
206 行增加
和
187 行删除
+206
-187
console.c
console.c
+1
-1
defs.h
defs.h
+0
-3
dot-bochsrc
dot-bochsrc
+1
-1
main.c
main.c
+5
-5
proc.c
proc.c
+156
-141
proc.h
proc.h
+2
-3
spinlock.c
spinlock.c
+14
-20
syscall.c
syscall.c
+2
-1
trap.c
trap.c
+2
-9
trapasm.S
trapasm.S
+9
-3
x86.h
x86.h
+14
-0
没有找到文件。
console.c
浏览文件 @
65bd8e13
...
@@ -113,7 +113,7 @@ void
...
@@ -113,7 +113,7 @@ void
cprintf
(
char
*
fmt
,
...)
cprintf
(
char
*
fmt
,
...)
{
{
int
i
,
state
=
0
,
c
;
int
i
,
state
=
0
,
c
;
unsigned
int
*
ap
=
(
unsigned
int
*
)
&
fmt
+
1
;
unsigned
int
*
ap
=
(
unsigned
int
*
)
(
void
*
)
&
fmt
+
1
;
if
(
use_console_lock
)
if
(
use_console_lock
)
acquire
(
&
console_lock
);
acquire
(
&
console_lock
);
...
...
defs.h
浏览文件 @
65bd8e13
...
@@ -13,7 +13,6 @@ struct proc;
...
@@ -13,7 +13,6 @@ struct proc;
struct
jmpbuf
;
struct
jmpbuf
;
void
setupsegs
(
struct
proc
*
);
void
setupsegs
(
struct
proc
*
);
struct
proc
*
newproc
(
void
);
struct
proc
*
newproc
(
void
);
void
swtch
(
int
);
struct
spinlock
;
struct
spinlock
;
void
sleep
(
void
*
,
struct
spinlock
*
);
void
sleep
(
void
*
,
struct
spinlock
*
);
void
wakeup
(
void
*
);
void
wakeup
(
void
*
);
...
@@ -22,8 +21,6 @@ void proc_exit(void);
...
@@ -22,8 +21,6 @@ void proc_exit(void);
int
proc_kill
(
int
);
int
proc_kill
(
int
);
int
proc_wait
(
void
);
int
proc_wait
(
void
);
void
yield
(
void
);
void
yield
(
void
);
void
cli
(
void
);
void
sti
(
void
);
// swtch.S
// swtch.S
struct
jmpbuf
;
struct
jmpbuf
;
...
...
dot-bochsrc
浏览文件 @
65bd8e13
...
@@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
...
@@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
# 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips
# 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips
# 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips
# 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips
#=======================================================================
#=======================================================================
cpu: count=2, ips=10000000
cpu: count=2, ips=10000000
, reset_on_triple_fault=0
#=======================================================================
#=======================================================================
# MEGS
# MEGS
...
...
main.c
浏览文件 @
65bd8e13
...
@@ -18,19 +18,19 @@ extern uint8_t _binary_userfs_start[], _binary_userfs_size[];
...
@@ -18,19 +18,19 @@ extern uint8_t _binary_userfs_start[], _binary_userfs_size[];
extern
int
use_console_lock
;
extern
int
use_console_lock
;
struct
spinlock
sillylock
;
// hold this to keep interrupts disabled
int
int
main
()
main
()
{
{
struct
proc
*
p
;
struct
proc
*
p
;
if
(
acpu
)
{
if
(
acpu
)
{
cpus
[
cpu
()].
clis
=
1
;
cprintf
(
"an application processor
\n
"
);
cprintf
(
"an application processor
\n
"
);
idtinit
();
// CPU's idt
idtinit
();
// CPU's idt
lapic_init
(
cpu
());
lapic_init
(
cpu
());
lapic_timerinit
();
lapic_timerinit
();
lapic_enableintr
();
lapic_enableintr
();
sti
();
scheduler
();
scheduler
();
}
}
acpu
=
1
;
acpu
=
1
;
...
@@ -40,10 +40,9 @@ main()
...
@@ -40,10 +40,9 @@ main()
mp_init
();
// collect info about this machine
mp_init
();
// collect info about this machine
acquire
(
&
sillylock
);
use_console_lock
=
1
;
use_console_lock
=
1
;
cpus
[
cpu
()].
clis
=
1
;
// cpu starts as if we had called cli()
lapic_init
(
mp_bcpu
());
lapic_init
(
mp_bcpu
());
cprintf
(
"
\n
xV6
\n\n
"
);
cprintf
(
"
\n
xV6
\n\n
"
);
...
@@ -56,7 +55,7 @@ main()
...
@@ -56,7 +55,7 @@ main()
// create fake process zero
// create fake process zero
p
=
&
proc
[
0
];
p
=
&
proc
[
0
];
memset
(
p
,
0
,
sizeof
*
p
);
memset
(
p
,
0
,
sizeof
*
p
);
p
->
state
=
WAIT
ING
;
p
->
state
=
SLEEP
ING
;
p
->
sz
=
4
*
PAGE
;
p
->
sz
=
4
*
PAGE
;
p
->
mem
=
kalloc
(
p
->
sz
);
p
->
mem
=
kalloc
(
p
->
sz
);
memset
(
p
->
mem
,
0
,
p
->
sz
);
memset
(
p
->
mem
,
0
,
p
->
sz
);
...
@@ -88,6 +87,7 @@ main()
...
@@ -88,6 +87,7 @@ main()
//load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
//load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
p
->
state
=
RUNNABLE
;
p
->
state
=
RUNNABLE
;
cprintf
(
"loaded userfs
\n
"
);
cprintf
(
"loaded userfs
\n
"
);
release
(
&
sillylock
);
scheduler
();
scheduler
();
...
...
proc.c
浏览文件 @
65bd8e13
...
@@ -12,6 +12,7 @@ struct spinlock proc_table_lock;
...
@@ -12,6 +12,7 @@ struct spinlock proc_table_lock;
struct
proc
proc
[
NPROC
];
struct
proc
proc
[
NPROC
];
struct
proc
*
curproc
[
NCPU
];
struct
proc
*
curproc
[
NCPU
];
int
next_pid
=
1
;
int
next_pid
=
1
;
extern
void
forkret
(
void
);
/*
/*
* set up a process's task state and segment descriptors
* set up a process's task state and segment descriptors
...
@@ -96,12 +97,14 @@ newproc()
...
@@ -96,12 +97,14 @@ newproc()
*
(
np
->
tf
)
=
*
(
op
->
tf
);
*
(
np
->
tf
)
=
*
(
op
->
tf
);
np
->
tf
->
tf_regs
.
reg_eax
=
0
;
// so fork() returns 0 in child
np
->
tf
->
tf_regs
.
reg_eax
=
0
;
// so fork() returns 0 in child
// set up new jmpbuf to start executing at trapret with esp pointing at tf
// Set up new jmpbuf to start executing forkret (see trapasm.S)
// with esp pointing at tf. Forkret will call forkret1 (below) to release
// the proc_table_lock and then jump into the usual trap return code.
memset
(
&
np
->
jmpbuf
,
0
,
sizeof
np
->
jmpbuf
);
memset
(
&
np
->
jmpbuf
,
0
,
sizeof
np
->
jmpbuf
);
np
->
jmpbuf
.
jb_eip
=
(
unsigned
)
trap
ret
;
np
->
jmpbuf
.
jb_eip
=
(
unsigned
)
fork
ret
;
np
->
jmpbuf
.
jb_esp
=
(
unsigned
)
np
->
tf
-
4
;
// -4 for the %eip that isn't actually there
np
->
jmpbuf
.
jb_esp
=
(
unsigned
)
np
->
tf
-
4
;
// -4 for the %eip that isn't actually there
//
c
opy file descriptors
//
C
opy file descriptors
for
(
fd
=
0
;
fd
<
NOFILE
;
fd
++
){
for
(
fd
=
0
;
fd
<
NOFILE
;
fd
++
){
np
->
fds
[
fd
]
=
op
->
fds
[
fd
];
np
->
fds
[
fd
]
=
op
->
fds
[
fd
];
if
(
np
->
fds
[
fd
])
if
(
np
->
fds
[
fd
])
...
@@ -112,127 +115,152 @@ newproc()
...
@@ -112,127 +115,152 @@ newproc()
}
}
void
void
forkret1
(
void
)
{
release
(
&
proc_table_lock
);
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - longjmp to start running that process
// - eventually that process transfers control back
// via longjmp back to the top of scheduler.
void
scheduler
(
void
)
scheduler
(
void
)
{
{
struct
proc
*
op
,
*
n
p
;
struct
proc
*
p
;
int
i
;
int
i
;
cprintf
(
"start scheduler on cpu %d jmpbuf %p
\n
"
,
cpu
(),
&
cpus
[
cpu
()].
jmpbuf
);
cprintf
(
"start scheduler on cpu %d jmpbuf %p
\n
"
,
cpu
(),
&
cpus
[
cpu
()].
jmpbuf
);
cpus
[
cpu
()].
lastproc
=
&
proc
[
0
];
cpus
[
cpu
()].
lastproc
=
&
proc
[
0
];
setjmp
(
&
cpus
[
cpu
()].
jmpbuf
);
for
(;;){
// Loop over process table looking for process to run.
op
=
curproc
[
cpu
()];
if
(
op
==
0
||
op
->
mtx
!=
&
proc_table_lock
)
acquire1
(
&
proc_table_lock
,
op
);
if
(
op
){
if
(
op
->
newstate
<=
0
||
op
->
newstate
>
ZOMBIE
)
panic
(
"scheduler"
);
op
->
state
=
op
->
newstate
;
op
->
newstate
=
-
1
;
if
(
op
->
mtx
){
struct
spinlock
*
mtx
=
op
->
mtx
;
op
->
mtx
=
0
;
if
(
mtx
!=
&
proc_table_lock
)
release1
(
mtx
,
op
);
}
}
// find a runnable process and switch to it
curproc
[
cpu
()]
=
0
;
np
=
cpus
[
cpu
()].
lastproc
+
1
;
while
(
1
){
for
(
i
=
0
;
i
<
NPROC
;
i
++
){
if
(
np
>=
&
proc
[
NPROC
])
np
=
&
proc
[
0
];
if
(
np
->
state
==
RUNNABLE
)
break
;
np
++
;
}
if
(
i
<
NPROC
){
np
->
state
=
RUNNING
;
release1
(
&
proc_table_lock
,
op
);
break
;
}
release1
(
&
proc_table_lock
,
op
);
op
=
0
;
acquire
(
&
proc_table_lock
);
acquire
(
&
proc_table_lock
);
np
=
&
proc
[
0
];
for
(
i
=
0
;
i
<
NPROC
;
i
++
){
}
p
=
&
proc
[
i
];
if
(
p
->
state
!=
RUNNABLE
)
cpus
[
cpu
()].
lastproc
=
np
;
continue
;
curproc
[
cpu
()]
=
np
;
// Run this process.
// XXX move this into swtch or trapret or something.
// It can run on the other stack.
// h/w sets busy bit in TSS descriptor sometimes, and faults
// h/w sets busy bit in TSS descriptor sometimes, and faults
// if it's set in LTR. so clear tss descriptor busy bit.
// if it's set in LTR. so clear tss descriptor busy bit.
n
p
->
gdt
[
SEG_TSS
].
sd_type
=
STS_T32A
;
p
->
gdt
[
SEG_TSS
].
sd_type
=
STS_T32A
;
// XXX should probably have an lgdt() function in x86.h
// XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly.
// to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
// a system call has moved a program or changed its size.
asm
volatile
(
"lgdt %0"
:
:
"g"
(
n
p
->
gdt_pd
.
pd_lim
));
asm
volatile
(
"lgdt %0"
:
:
"g"
(
p
->
gdt_pd
.
pd_lim
));
ltr
(
SEG_TSS
<<
3
);
ltr
(
SEG_TSS
<<
3
);
if
(
0
)
cprintf
(
"cpu%d: run %d esp=%p callerpc=%p
\n
"
,
cpu
(),
np
-
proc
);
// Switch to chosen process. It is the process's job
longjmp
(
&
np
->
jmpbuf
);
// to release proc_table_lock and then reacquire it
// before jumping back to us.
if
(
0
)
cprintf
(
"cpu%d: run %d
\n
"
,
cpu
(),
p
-
proc
);
curproc
[
cpu
()]
=
p
;
p
->
state
=
RUNNING
;
if
(
setjmp
(
&
cpus
[
cpu
()].
jmpbuf
)
==
0
)
longjmp
(
&
p
->
jmpbuf
);
// Process is done running for now.
// It should have changed its p->state before coming back.
curproc
[
cpu
()]
=
0
;
if
(
p
->
state
==
RUNNING
)
panic
(
"swtch to scheduler with state=RUNNING"
);
// XXX if not holding proc_table_lock panic.
}
release
(
&
proc_table_lock
);
if
(
cpus
[
cpu
()].
nlock
!=
0
)
panic
(
"holding locks in scheduler"
);
// With proc_table_lock released, there are no
// locks held on this cpu, so interrupts are enabled.
// Hardware interrupts can happen here.
// Also, releasing the lock here lets the other CPUs
// look for runnable processes too.
}
}
}
//
give up the cpu by switching to the scheduler,
//
Enter scheduler. Must already hold proc_table_lock
//
which runs on the per-cpu stack
.
//
and have changed curproc[cpu()]->state
.
void
void
s
wtch
(
int
newstate
)
s
ched
(
void
)
{
{
struct
proc
*
p
=
curproc
[
cpu
()];
if
(
setjmp
(
&
curproc
[
cpu
()]
->
jmpbuf
)
==
0
)
if
(
p
==
0
)
panic
(
"swtch no proc"
);
if
(
p
->
mtx
==
0
&&
p
->
locks
!=
0
)
panic
(
"swtch w/ locks"
);
if
(
p
->
mtx
&&
p
->
locks
!=
1
)
panic
(
"swtch w/ locks 1"
);
if
(
p
->
mtx
&&
p
->
mtx
->
locked
==
0
)
panic
(
"switch w/ lock but not held"
);
if
(
p
->
locks
&&
(
read_eflags
()
&
FL_IF
))
panic
(
"swtch w/ lock but FL_IF"
);
p
->
newstate
=
newstate
;
// basically an argument to scheduler()
if
(
setjmp
(
&
p
->
jmpbuf
)
==
0
)
longjmp
(
&
cpus
[
cpu
()].
jmpbuf
);
longjmp
(
&
cpus
[
cpu
()].
jmpbuf
);
}
}
// Give up the CPU for one scheduling round.
void
yield
()
{
struct
proc
*
p
;
if
((
p
=
curproc
[
cpu
()])
==
0
||
curproc
[
cpu
()]
->
state
!=
RUNNING
)
panic
(
"yield"
);
acquire
(
&
proc_table_lock
);
p
->
state
=
RUNNABLE
;
sched
();
release
(
&
proc_table_lock
);
}
// Atomically release lock and sleep on chan.
// Reacquires lock when reawakened.
void
void
sleep
(
void
*
chan
,
struct
spinlock
*
mtx
)
sleep
(
void
*
chan
,
struct
spinlock
*
lk
)
{
{
struct
proc
*
p
=
curproc
[
cpu
()];
struct
proc
*
p
=
curproc
[
cpu
()];
if
(
p
==
0
)
if
(
p
==
0
)
panic
(
"sleep"
);
panic
(
"sleep"
);
p
->
chan
=
chan
;
// Must acquire proc_table_lock in order to
p
->
mtx
=
mtx
;
// scheduler will release it
// change p->state and then call sched.
// Once we hold proc_table_lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup runs with proc_table_lock locked),
// so it's okay to release lk.
if
(
lk
!=
&
proc_table_lock
){
acquire
(
&
proc_table_lock
);
release
(
lk
);
}
swtch
(
WAITING
);
// Go to sleep.
p
->
chan
=
chan
;
p
->
state
=
SLEEPING
;
sched
();
if
(
mtx
)
// Tidy up.
acquire
(
mtx
);
p
->
chan
=
0
;
p
->
chan
=
0
;
// Reacquire original lock.
if
(
lk
!=
&
proc_table_lock
){
release
(
&
proc_table_lock
);
acquire
(
lk
);
}
}
}
// Wake up all processes sleeping on chan.
// Proc_table_lock must be held.
void
void
wakeup1
(
void
*
chan
)
wakeup1
(
void
*
chan
)
{
{
struct
proc
*
p
;
struct
proc
*
p
;
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
if
(
p
->
state
==
WAIT
ING
&&
p
->
chan
==
chan
)
if
(
p
->
state
==
SLEEP
ING
&&
p
->
chan
==
chan
)
p
->
state
=
RUNNABLE
;
p
->
state
=
RUNNABLE
;
}
}
// Wake up all processes sleeping on chan.
// Proc_table_lock is acquired and released.
void
void
wakeup
(
void
*
chan
)
wakeup
(
void
*
chan
)
{
{
...
@@ -241,15 +269,32 @@ wakeup(void *chan)
...
@@ -241,15 +269,32 @@ wakeup(void *chan)
release
(
&
proc_table_lock
);
release
(
&
proc_table_lock
);
}
}
// give up the CPU but stay marked as RUNNABLE
// Kill the process with the given pid.
void
// Process won't actually exit until it returns
yield
()
// to user space (see trap in trap.c).
int
proc_kill
(
int
pid
)
{
{
if
(
curproc
[
cpu
()]
==
0
||
curproc
[
cpu
()]
->
state
!=
RUNNING
)
struct
proc
*
p
;
panic
(
"yield"
);
swtch
(
RUNNABLE
);
acquire
(
&
proc_table_lock
);
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
){
if
(
p
->
pid
==
pid
){
p
->
killed
=
1
;
// Wake process from sleep if necessary.
if
(
p
->
state
==
SLEEPING
)
p
->
state
=
RUNNABLE
;
release
(
&
proc_table_lock
);
return
0
;
}
}
release
(
&
proc_table_lock
);
return
-
1
;
}
}
// Exit the current process. Does not return.
// Exited processes remain in the zombie state
// until their parent calls wait() to find out they exited.
void
void
proc_exit
()
proc_exit
()
{
{
...
@@ -257,6 +302,7 @@ proc_exit()
...
@@ -257,6 +302,7 @@ proc_exit()
struct
proc
*
cp
=
curproc
[
cpu
()];
struct
proc
*
cp
=
curproc
[
cpu
()];
int
fd
;
int
fd
;
// Close all open files.
for
(
fd
=
0
;
fd
<
NOFILE
;
fd
++
){
for
(
fd
=
0
;
fd
<
NOFILE
;
fd
++
){
if
(
cp
->
fds
[
fd
]){
if
(
cp
->
fds
[
fd
]){
fd_close
(
cp
->
fds
[
fd
]);
fd_close
(
cp
->
fds
[
fd
]);
...
@@ -266,91 +312,60 @@ proc_exit()
...
@@ -266,91 +312,60 @@ proc_exit()
acquire
(
&
proc_table_lock
);
acquire
(
&
proc_table_lock
);
//
wake up parent
//
Wake up our parent.
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
if
(
p
->
pid
==
cp
->
ppid
)
if
(
p
->
pid
==
cp
->
ppid
)
wakeup1
(
p
);
wakeup1
(
p
);
//
abandon children
//
Reparent our children to process 1.
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
)
if
(
p
->
ppid
==
cp
->
pid
)
if
(
p
->
ppid
==
cp
->
pid
)
p
->
pid
=
1
;
p
->
p
p
id
=
1
;
cp
->
mtx
=
&
proc_table_lock
;
// Jump into the scheduler, never to return.
swtch
(
ZOMBIE
);
cp
->
state
=
ZOMBIE
;
panic
(
"a zombie revived"
);
sched
();
panic
(
"zombie exit"
);
}
}
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
int
proc_wait
(
void
)
proc_wait
(
void
)
{
{
struct
proc
*
p
;
struct
proc
*
p
;
struct
proc
*
cp
=
curproc
[
cpu
()];
struct
proc
*
cp
=
curproc
[
cpu
()];
int
any
,
pid
;
int
i
,
havekids
,
pid
;
acquire
(
&
proc_table_lock
);
acquire
(
&
proc_table_lock
);
for
(;;){
while
(
1
){
// Scan through table looking zombie children.
any
=
0
;
havekids
=
0
;
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
){
for
(
i
=
0
;
i
<
NPROC
;
i
++
){
if
(
p
->
state
==
ZOMBIE
&&
p
->
ppid
==
cp
->
pid
){
p
=
&
proc
[
i
];
if
(
p
->
ppid
==
cp
->
pid
){
if
(
p
->
state
==
ZOMBIE
){
// Found one.
kfree
(
p
->
mem
,
p
->
sz
);
kfree
(
p
->
mem
,
p
->
sz
);
kfree
(
p
->
kstack
,
KSTACKSIZE
);
kfree
(
p
->
kstack
,
KSTACKSIZE
);
pid
=
p
->
pid
;
pid
=
p
->
pid
;
p
->
state
=
UNUSED
;
p
->
state
=
UNUSED
;
p
->
pid
=
0
;
release
(
&
proc_table_lock
);
release
(
&
proc_table_lock
);
return
pid
;
return
pid
;
}
}
if
(
p
->
state
!=
UNUSED
&&
p
->
ppid
==
cp
->
pid
)
havekids
=
1
;
any
=
1
;
}
}
if
(
any
==
0
){
release
(
&
proc_table_lock
);
return
-
1
;
}
}
sleep
(
cp
,
&
proc_table_lock
);
}
}
int
proc_kill
(
int
pid
)
{
struct
proc
*
p
;
acquire
(
&
proc_table_lock
);
// No point waiting if we don't have any children.
for
(
p
=
proc
;
p
<
&
proc
[
NPROC
];
p
++
){
if
(
!
havekids
){
if
(
p
->
pid
==
pid
&&
p
->
state
!=
UNUSED
){
p
->
killed
=
1
;
if
(
p
->
state
==
WAITING
)
p
->
state
=
RUNNABLE
;
release
(
&
proc_table_lock
);
return
0
;
}
}
release
(
&
proc_table_lock
);
release
(
&
proc_table_lock
);
return
-
1
;
return
-
1
;
}
}
// disable interrupts
// Wait for children to exit. (See wakeup1 call in proc_exit.)
void
sleep
(
cp
,
&
proc_table_lock
);
cli
(
void
)
}
{
if
(
cpus
[
cpu
()].
clis
==
0
)
__asm
__volatile
(
"cli"
);
cpus
[
cpu
()].
clis
+=
1
;
if
((
read_eflags
()
&
FL_IF
)
!=
0
)
panic
(
"cli but enabled"
);
}
}
// enable interrupts
void
sti
(
void
)
{
if
((
read_eflags
()
&
FL_IF
)
!=
0
)
panic
(
"sti but enabled"
);
if
(
cpus
[
cpu
()].
clis
<
1
)
panic
(
"sti"
);
cpus
[
cpu
()].
clis
-=
1
;
if
(
cpus
[
cpu
()].
clis
<
1
)
__asm
__volatile
(
"sti"
);
}
proc.h
浏览文件 @
65bd8e13
...
@@ -33,7 +33,7 @@ struct jmpbuf {
...
@@ -33,7 +33,7 @@ struct jmpbuf {
int
jb_eip
;
int
jb_eip
;
};
};
enum
proc_state
{
UNUSED
,
EMBRYO
,
WAIT
ING
,
RUNNABLE
,
RUNNING
,
ZOMBIE
};
enum
proc_state
{
UNUSED
,
EMBRYO
,
SLEEP
ING
,
RUNNABLE
,
RUNNING
,
ZOMBIE
};
struct
proc
{
struct
proc
{
char
*
mem
;
// start of process's physical memory
char
*
mem
;
// start of process's physical memory
...
@@ -46,7 +46,6 @@ struct proc{
...
@@ -46,7 +46,6 @@ struct proc{
int
ppid
;
int
ppid
;
void
*
chan
;
// sleep
void
*
chan
;
// sleep
int
killed
;
int
killed
;
int
locks
;
// # of locks currently held
struct
fd
*
fds
[
NOFILE
];
struct
fd
*
fds
[
NOFILE
];
struct
Taskstate
ts
;
// only to give cpu address of kernel stack
struct
Taskstate
ts
;
// only to give cpu address of kernel stack
...
@@ -71,7 +70,7 @@ struct cpu {
...
@@ -71,7 +70,7 @@ struct cpu {
struct
jmpbuf
jmpbuf
;
struct
jmpbuf
jmpbuf
;
char
mpstack
[
MPSTACK
];
// per-cpu start-up stack, only used to get into main()
char
mpstack
[
MPSTACK
];
// per-cpu start-up stack, only used to get into main()
struct
proc
*
lastproc
;
// last proc scheduled on this cpu (never NULL)
struct
proc
*
lastproc
;
// last proc scheduled on this cpu (never NULL)
int
clis
;
// cli() nesting depth
int
nlock
;
// # of locks currently held
};
};
extern
struct
cpu
cpus
[
NCPU
];
extern
struct
cpu
cpus
[
NCPU
];
...
...
spinlock.c
浏览文件 @
65bd8e13
...
@@ -6,42 +6,35 @@
...
@@ -6,42 +6,35 @@
#include "proc.h"
#include "proc.h"
#include "spinlock.h"
#include "spinlock.h"
#define DEBUG 0
// Can't call cprintf from inside these routines,
// because cprintf uses them itself.
#define cprintf dont_use_cprintf
extern
int
use_console_lock
;
extern
int
use_console_lock
;
int
getcallerpc
(
void
*
v
)
{
int
getcallerpc
(
void
*
v
)
{
return
((
int
*
)
v
)[
-
1
];
return
((
int
*
)
v
)[
-
1
];
}
}
void
void
acquire1
(
struct
spinlock
*
lock
,
struct
proc
*
cp
)
acquire1
(
struct
spinlock
*
lock
,
struct
proc
*
cp
)
{
{
if
(
DEBUG
)
cprintf
(
"cpu%d: acquiring at %x
\n
"
,
cpu
(),
getcallerpc
(
&
lock
));
if
(
cpus
[
cpu
()].
nlock
++
==
0
)
cli
();
cli
();
while
(
cmpxchg
(
0
,
1
,
&
lock
->
locked
)
==
1
)
{
;
}
while
(
cmpxchg
(
0
,
1
,
&
lock
->
locked
)
==
1
)
;
cpuid
(
0
,
0
,
0
,
0
,
0
);
// memory barrier
lock
->
locker_pc
=
getcallerpc
(
&
lock
);
lock
->
locker_pc
=
getcallerpc
(
&
lock
);
if
(
cp
)
cp
->
locks
+=
1
;
if
(
DEBUG
)
cprintf
(
"cpu%d: acquired at %x
\n
"
,
cpu
(),
getcallerpc
(
&
lock
));
}
}
void
void
release1
(
struct
spinlock
*
lock
,
struct
proc
*
cp
)
release1
(
struct
spinlock
*
lock
,
struct
proc
*
cp
)
{
{
cpuid
(
0
,
0
,
0
,
0
,
0
);
// memory barrier
if
(
DEBUG
)
cprintf
(
"cpu%d: releasing at %x
\n
"
,
cpu
(),
getcallerpc
(
&
lock
));
lock
->
locked
=
0
;
if
(
--
cpus
[
cpu
()].
nlock
==
0
)
if
(
lock
->
locked
!=
1
)
panic
(
"release"
);
if
(
cp
)
cp
->
locks
-=
1
;
cmpxchg
(
1
,
0
,
&
lock
->
locked
);
sti
();
sti
();
}
}
...
@@ -56,3 +49,4 @@ release(struct spinlock *lock)
...
@@ -56,3 +49,4 @@ release(struct spinlock *lock)
{
{
release1
(
lock
,
curproc
[
cpu
()]);
release1
(
lock
,
curproc
[
cpu
()]);
}
}
syscall.c
浏览文件 @
65bd8e13
...
@@ -34,8 +34,9 @@ fetchint(struct proc *p, unsigned addr, int *ip)
...
@@ -34,8 +34,9 @@ fetchint(struct proc *p, unsigned addr, int *ip)
return
0
;
return
0
;
}
}
// This arg is void* so that both int* and uint* can be passed.
int
int
fetcharg
(
int
argno
,
int
*
ip
)
fetcharg
(
int
argno
,
void
*
ip
)
{
{
unsigned
esp
;
unsigned
esp
;
...
...
trap.c
浏览文件 @
65bd8e13
...
@@ -36,11 +36,6 @@ trap(struct Trapframe *tf)
...
@@ -36,11 +36,6 @@ trap(struct Trapframe *tf)
{
{
int
v
=
tf
->
tf_trapno
;
int
v
=
tf
->
tf_trapno
;
if
(
cpus
[
cpu
()].
clis
){
cprintf
(
"cpu %d v %d eip %x
\n
"
,
cpu
(),
v
,
tf
->
tf_eip
);
panic
(
"interrupt while interrupts are off"
);
}
if
(
v
==
T_SYSCALL
){
if
(
v
==
T_SYSCALL
){
struct
proc
*
cp
=
curproc
[
cpu
()];
struct
proc
*
cp
=
curproc
[
cpu
()];
int
num
=
cp
->
tf
->
tf_regs
.
reg_eax
;
int
num
=
cp
->
tf
->
tf_regs
.
reg_eax
;
...
@@ -56,12 +51,10 @@ trap(struct Trapframe *tf)
...
@@ -56,12 +51,10 @@ trap(struct Trapframe *tf)
panic
(
"trap ret but not RUNNING"
);
panic
(
"trap ret but not RUNNING"
);
if
(
tf
!=
cp
->
tf
)
if
(
tf
!=
cp
->
tf
)
panic
(
"trap ret wrong tf"
);
panic
(
"trap ret wrong tf"
);
if
(
cp
->
locks
){
if
(
cp
us
[
cpu
()].
nlock
){
cprintf
(
"num=%d
\n
"
,
num
);
cprintf
(
"num=%d
\n
"
,
num
);
panic
(
"syscall returning locks held"
);
panic
(
"syscall returning locks held"
);
}
}
if
(
cpus
[
cpu
()].
clis
)
panic
(
"syscall returning but clis != 0"
);
if
((
read_eflags
()
&
FL_IF
)
==
0
)
if
((
read_eflags
()
&
FL_IF
)
==
0
)
panic
(
"syscall returning but FL_IF clear"
);
panic
(
"syscall returning but FL_IF clear"
);
if
(
read_esp
()
<
(
unsigned
)
cp
->
kstack
||
if
(
read_esp
()
<
(
unsigned
)
cp
->
kstack
||
...
@@ -75,7 +68,7 @@ trap(struct Trapframe *tf)
...
@@ -75,7 +68,7 @@ trap(struct Trapframe *tf)
if
(
v
==
(
IRQ_OFFSET
+
IRQ_TIMER
)){
if
(
v
==
(
IRQ_OFFSET
+
IRQ_TIMER
)){
struct
proc
*
cp
=
curproc
[
cpu
()];
struct
proc
*
cp
=
curproc
[
cpu
()];
lapic_timerintr
();
lapic_timerintr
();
if
(
cp
&&
cp
->
locks
)
if
(
cp
us
[
cpu
()].
nlock
)
panic
(
"timer interrupt while holding a lock"
);
panic
(
"timer interrupt while holding a lock"
);
if
(
cp
){
if
(
cp
){
#if 1
#if 1
...
...
trapasm.S
浏览文件 @
65bd8e13
#include "mmu.h"
#include "mmu.h"
.text
.text
.globl alltraps
.globl trap
.globl trap
.globl trapret1
.globl alltraps
alltraps:
alltraps:
/* vectors.S sends all traps here */
/* vectors.S sends all traps here */
pushl %ds # build
pushl %ds # build
...
@@ -16,11 +18,11 @@ alltraps:
...
@@ -16,11 +18,11 @@ alltraps:
addl $4, %esp
addl $4, %esp
# return falls through to trapret...
# return falls through to trapret...
.globl trapret
/*
/*
* a forked process RETs here
* a forked process RETs here
* expects ESP to point to a Trapframe
* expects ESP to point to a Trapframe
*/
*/
.globl trapret
trapret:
trapret:
popal
popal
popl %es
popl %es
...
@@ -28,6 +30,10 @@ trapret:
...
@@ -28,6 +30,10 @@ trapret:
addl $0x8, %esp /* trapno and errcode */
addl $0x8, %esp /* trapno and errcode */
iret
iret
.globl forkret
forkret:
call forkret1
jmp trapret
.globl acpu
.globl acpu
acpu:
acpu:
...
...
x86.h
浏览文件 @
65bd8e13
...
@@ -29,6 +29,8 @@ static __inline uint32_t read_ebp(void) __attribute__((always_inline));
...
@@ -29,6 +29,8 @@ static __inline uint32_t read_ebp(void) __attribute__((always_inline));
static
__inline
uint32_t
read_esp
(
void
)
__attribute__
((
always_inline
));
static
__inline
uint32_t
read_esp
(
void
)
__attribute__
((
always_inline
));
static
__inline
void
cpuid
(
uint32_t
info
,
uint32_t
*
eaxp
,
uint32_t
*
ebxp
,
uint32_t
*
ecxp
,
uint32_t
*
edxp
);
static
__inline
void
cpuid
(
uint32_t
info
,
uint32_t
*
eaxp
,
uint32_t
*
ebxp
,
uint32_t
*
ecxp
,
uint32_t
*
edxp
);
static
__inline
uint64_t
read_tsc
(
void
)
__attribute__
((
always_inline
));
static
__inline
uint64_t
read_tsc
(
void
)
__attribute__
((
always_inline
));
static
__inline
void
cli
(
void
)
__attribute__
((
always_inline
));
static
__inline
void
sti
(
void
)
__attribute__
((
always_inline
));
static
__inline
void
static
__inline
void
breakpoint
(
void
)
breakpoint
(
void
)
...
@@ -304,6 +306,18 @@ read_tsc(void)
...
@@ -304,6 +306,18 @@ read_tsc(void)
return
tsc
;
return
tsc
;
}
}
static
__inline
void
cli
(
void
)
{
__asm__
volatile
(
"cli"
);
}
static
__inline
void
sti
(
void
)
{
__asm__
volatile
(
"sti"
);
}
struct
PushRegs
{
struct
PushRegs
{
/* registers as pushed by pusha */
/* registers as pushed by pusha */
uint32_t
reg_edi
;
uint32_t
reg_edi
;
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论