Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
ccb4db66
提交
ccb4db66
10月 25, 2011
创建
作者:
Silas Boyd-Wickizer
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
yield, exit, part of trap, and pagefault stub.
上级
71fbc8bc
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
232 行增加
和
6 行删除
+232
-6
proc.c
proc.c
+63
-0
trap.c
trap.c
+119
-6
vm.c
vm.c
+50
-0
没有找到文件。
proc.c
浏览文件 @
ccb4db66
...
...
@@ -48,6 +48,16 @@ sched(void)
mycpu
()
->
intena
=
intena
;
}
// Give up the CPU for one scheduling round.
void
yield
(
void
)
{
acquire
(
&
myproc
()
->
lock
);
//DOC: yieldlock
myproc
()
->
state
=
RUNNABLE
;
sched
();
release
(
&
myproc
()
->
lock
);
}
// Mark a process RUNNABLE and add it to the runq
// of its cpu. Caller must hold p->lock so that
// some other core doesn't start running the
...
...
@@ -104,6 +114,59 @@ forkret(void)
// Return to "caller", actually trapret (see allocproc).
}
// Exit the current process. Does not return.
// An exited process remains in the zombie state
// until its parent calls wait() to find out it exited.
void
exit
(
void
)
{
struct
proc
*
p
,
*
np
;
int
fd
;
int
wakeupinit
;
if
(
myproc
()
==
bootproc
)
panic
(
"init exiting"
);
// Close all open files.
for
(
fd
=
0
;
fd
<
NOFILE
;
fd
++
){
if
(
myproc
()
->
ofile
[
fd
]){
fileclose
(
myproc
()
->
ofile
[
fd
]);
myproc
()
->
ofile
[
fd
]
=
0
;
}
}
iput
(
myproc
()
->
cwd
);
myproc
()
->
cwd
=
0
;
// Pass abandoned children to init.
wakeupinit
=
0
;
SLIST_FOREACH_SAFE
(
p
,
&
(
myproc
()
->
childq
),
child_next
,
np
)
{
acquire
(
&
p
->
lock
);
p
->
parent
=
bootproc
;
if
(
p
->
state
==
ZOMBIE
)
wakeupinit
=
1
;
SLIST_REMOVE
(
&
(
myproc
()
->
childq
),
p
,
proc
,
child_next
);
release
(
&
p
->
lock
);
acquire
(
&
bootproc
->
lock
);
SLIST_INSERT_HEAD
(
&
bootproc
->
childq
,
p
,
child_next
);
release
(
&
bootproc
->
lock
);
}
// Parent might be sleeping in wait().
acquire
(
&
(
myproc
()
->
lock
));
cv_wakeup
(
&
(
myproc
()
->
parent
->
cv
));
if
(
wakeupinit
)
cv_wakeup
(
&
bootproc
->
cv
);
// Jump into the scheduler, never to return.
myproc
()
->
state
=
ZOMBIE
;
sched
();
panic
(
"zombie exit"
);
}
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and initialize
// state required to run in the kernel.
...
...
trap.c
浏览文件 @
ccb4db66
...
...
@@ -5,9 +5,18 @@
#include "x86.h"
#include "cpu.h"
#include "traps.h"
#include "queue.h"
#include "spinlock.h"
#include "condvar.h"
#include "proc.h"
#include "xv6-mtrace.h"
#include "bits.h"
u64
ticks
__mpalign__
;
static
struct
spinlock
tickslock
__mpalign__
;
static
struct
condvar
cv_ticks
__mpalign__
;
struct
segdesc
__attribute__
((
aligned
(
16
)))
bootgdt
[
NSEGS
]
=
{
// null
[
0
]
=
SEGDESC
(
0
,
0
,
0
),
...
...
@@ -31,12 +40,116 @@ extern u64 trapentry[];
void
trap
(
struct
trapframe
*
tf
)
{
u32
no
=
tf
->
trapno
;
if
(
no
==
T_PGFLT
)
cprintf
(
"va %lx rip %lx rsp %lx
\n
"
,
rcr2
(),
tf
->
rip
,
tf
->
rsp
);
else
cprintf
(
"no %d rip %lx rsp %lx
\n
"
,
tf
->
trapno
,
tf
->
rip
,
tf
->
rsp
);
panic
(
"trap"
);
// XXX(sbw) eventually these should be moved into trapasm.S
writegs
(
KDSEG
);
writemsr
(
MSR_GS_BASE
,
(
u64
)
&
cpus
[
cpunum
()].
cpu
);
// XXX(sbw) sysenter/sysexit
#if 0
if(tf->trapno == T_SYSCALL){
if(proc->killed) {
mtrace_kstack_start(trap, proc);
exit();
}
proc->tf = tf;
syscall();
if(proc->killed) {
mtrace_kstack_start(trap, proc);
exit();
}
return;
}
#endif
if
(
myproc
()
->
mtrace_stacks
.
curr
>=
0
)
mtrace_kstack_pause
(
myproc
());
mtrace_kstack_start
(
trap
,
myproc
());
switch
(
tf
->
trapno
){
case
T_IRQ0
+
IRQ_TIMER
:
if
(
mycpu
()
->
id
==
0
){
acquire
(
&
tickslock
);
ticks
++
;
cv_wakeup
(
&
cv_ticks
);
release
(
&
tickslock
);
}
lapiceoi
();
break
;
case
T_IRQ0
+
IRQ_IDE
:
ideintr
();
lapiceoi
();
break
;
case
T_IRQ0
+
IRQ_IDE
+
1
:
// Bochs generates spurious IDE1 interrupts.
break
;
case
T_IRQ0
+
IRQ_KBD
:
#if 0
kbdintr();
lapiceoi();
#endif
panic
(
"IRQ_KBD"
);
break
;
case
T_IRQ0
+
IRQ_COM1
:
#if 0
uartintr();
lapiceoi();
#endif
panic
(
"IRQ_COM1"
);
break
;
case
T_IRQ0
+
7
:
case
T_IRQ0
+
IRQ_SPURIOUS
:
cprintf
(
"cpu%d: spurious interrupt at %x:%x
\n
"
,
mycpu
()
->
id
,
tf
->
cs
,
tf
->
rip
);
lapiceoi
();
case
T_TLBFLUSH
:
lapiceoi
();
lcr3
(
rcr3
());
break
;
//PAGEBREAK: 13
default:
if
(
myproc
()
==
0
||
(
tf
->
cs
&
3
)
==
0
){
// In kernel, it must be our mistake.
cprintf
(
"unexpected trap %d from cpu %d rip %lx (cr2=0x%lx)
\n
"
,
tf
->
trapno
,
mycpu
()
->
id
,
tf
->
rip
,
rcr2
());
panic
(
"trap"
);
}
if
(
tf
->
trapno
==
T_PGFLT
){
if
(
pagefault
(
myproc
()
->
vmap
,
rcr2
(),
tf
->
err
)
>=
0
){
mtrace_kstack_stop
(
myproc
());
if
(
myproc
()
->
mtrace_stacks
.
curr
>=
0
)
mtrace_kstack_resume
(
myproc
());
return
;
}
}
// In user space, assume process misbehaved.
cprintf
(
"pid %d %s: trap %d err %d on cpu %d "
"eip 0x%x addr 0x%x--kill proc
\n
"
,
myproc
()
->
pid
,
myproc
()
->
name
,
tf
->
trapno
,
tf
->
err
,
mycpu
()
->
id
,
tf
->
rip
,
rcr2
());
myproc
()
->
killed
=
1
;
}
// Force process exit if it has been killed and is in user space.
// (If it is still executing in the kernel, let it keep running
// until it gets to the regular system call return.)
if
(
myproc
()
&&
myproc
()
->
killed
&&
(
tf
->
cs
&
3
)
==
0x3
)
exit
();
// Force process to give up CPU on clock tick.
// If interrupts were on while locks held, would need to check nlock.
if
(
myproc
()
&&
myproc
()
->
state
==
RUNNING
&&
tf
->
trapno
==
T_IRQ0
+
IRQ_TIMER
)
yield
();
// Check if the process has been killed since we yielded
if
(
myproc
()
&&
myproc
()
->
killed
&&
(
tf
->
cs
&
3
)
==
0x3
)
exit
();
mtrace_kstack_stop
(
myproc
());
if
(
myproc
()
->
mtrace_stacks
.
curr
>=
0
)
mtrace_kstack_resume
(
myproc
());
}
void
...
...
vm.c
浏览文件 @
ccb4db66
...
...
@@ -339,10 +339,60 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
return
0
;
}
int
pagefault
(
struct
vmap
*
vmap
,
u64
va
,
u32
err
)
{
cprintf
(
"va %lx
\n
"
,
va
);
panic
(
"pagefault"
);
return
0
;
#if 0
pte_t *pte = walkpgdir(vmap->pgdir, (const void *)va, 1);
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W)) { // optimize checks of args to syscals
return 0;
}
// cprintf("%d: pagefault 0x%x err 0x%x pte 0x%x\n", proc->pid, va, err, *pte);
rcu_begin_read();
struct vma *m = vmap_lookup(vmap, va, 1);
if(m == 0) {
// cprintf("pagefault: no vma\n");
rcu_end_read();
return -1;
}
acquire(&m->lock);
uint npg = (PGROUNDDOWN(va) - m->va_start) / PGSIZE;
// cprintf("%d: pagefault: valid vma 0x%x 0x%x %d (cow=%d)\n", proc->pid, m->va_start,
// m->va_type, COW);
// if (m->n)
// cprintf("page %d 0x%x %d %d\n", npg, m->n->page[npg], m->n->type, ONDEMAND);
if (m->n && m->n->type == ONDEMAND && m->n->page[npg] == 0) {
m = pagefault_ondemand(vmap, va, err, m);
}
if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) {
release(&m->lock);
rcu_end_read();
return -1;
}
} else if (m->va_type == COW) {
*pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_COW;
} else {
if (m->n->ref != 1) {
panic("pagefault");
}
*pte = v2p(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
}
lcr3(v2p(vmap->pgdir)); // Reload hardware page tables
release(&m->lock);
rcu_end_read();
return 1;
#endif
}
#if 0
void
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论