Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
6ca8f6fd
提交
6ca8f6fd
4月 14, 2012
创建
作者:
Silas Boyd-Wickizer
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
First pass at Nickolai's exec hack.
I think i got it right..if it's buggy you can disable with EXECSWITCH 0 in param.h.
上级
a6b4886d
显示空白字符变更
内嵌
并排
正在显示
7 个修改的文件
包含
111 行增加
和
16 行删除
+111
-16
kernel.hh
include/kernel.hh
+5
-0
proc.hh
include/proc.hh
+3
-0
exec.cc
kernel/exec.cc
+0
-11
proc.cc
kernel/proc.cc
+80
-0
sched.cc
kernel/sched.cc
+3
-1
sysfile.cc
kernel/sysfile.cc
+19
-3
param.h
param.h
+1
-1
没有找到文件。
include/kernel.hh
浏览文件 @
6ca8f6fd
...
...
@@ -175,6 +175,7 @@ int pipewrite(struct pipe*, const char*, int);
// proc.c
struct
proc
*
copyproc
(
struct
proc
*
);
void
finishproc
(
struct
proc
*
,
bool
removepid
=
true
);
void
execswitch
(
proc
*
p
);
void
exit
(
void
);
int
fork
(
int
);
int
growproc
(
int
);
...
...
@@ -219,6 +220,10 @@ int fetchmem(void*, const void*, u64);
int
putmem
(
void
*
,
const
void
*
,
u64
);
u64
syscall
(
u64
a0
,
u64
a1
,
u64
a2
,
u64
a3
,
u64
a4
,
u64
a5
,
u64
num
);
// sysfile.cc
int
doexec
(
const
char
*
upath
,
userptr
<
userptr
<
const
char
>
>
uargv
);
// string.c
extern
"C"
int
memcmp
(
const
void
*
,
const
void
*
,
size_t
);
void
*
memmove
(
void
*
,
const
void
*
,
size_t
);
...
...
include/proc.hh
浏览文件 @
6ca8f6fd
...
...
@@ -84,6 +84,9 @@ struct proc : public rcu_freed, public sched_link {
int
run_cpuid_
;
int
in_exec_
;
int
uaccess_
;
const
char
*
upath
;
userptr
<
userptr
<
const
char
>
>
uargv
;
u8
__cxa_eh_global
[
16
];
std
::
atomic
<
int
>
exception_inuse
;
...
...
kernel/exec.cc
浏览文件 @
6ca8f6fd
...
...
@@ -157,17 +157,6 @@ exec(const char *path, char **argv, void *ascopev)
cwork
*
w
;
long
sp
;
myproc
()
->
exec_cpuid_
=
mycpuid
();
mt_ascope
*
ascope
=
(
mt_ascope
*
)
ascopev
;
ascope
->
close
();
myproc
()
->
in_exec_
=
1
;
yield
();
myproc
()
->
in_exec_
=
0
;
ascope
->
open
(
"sys_exec2(%s)"
,
path
);
myproc
()
->
run_cpuid_
=
mycpuid
();
if
((
ip
=
namei
(
myproc
()
->
cwd
,
path
))
==
0
)
return
-
1
;
...
...
kernel/proc.cc
浏览文件 @
6ca8f6fd
...
...
@@ -42,6 +42,7 @@ proc::proc(int npid) :
ftable
(
0
),
cwd
(
0
),
tsc
(
0
),
curcycles
(
0
),
cpuid
(
0
),
epoch
(
0
),
cpu_pin
(
0
),
oncv
(
0
),
cv_wakeup
(
0
),
user_fs_
(
0
),
unmap_tlbreq_
(
0
),
in_exec_
(
0
),
uaccess_
(
0
),
upath
(
0
),
uargv
(
userptr
<
const
char
>
(
nullptr
)),
exception_inuse
(
0
),
magic
(
PROC_MAGIC
),
state_
(
EMBRYO
)
{
snprintf
(
lockname
,
sizeof
(
lockname
),
"cv:proc:%d"
,
pid
);
...
...
@@ -204,6 +205,85 @@ freeproc(struct proc *p)
gc_delayed
(
p
);
}
void
execstub
(
void
)
{
userptr
<
userptr
<
const
char
>
>
uargv
;
const
char
*
upath
;
upath
=
myproc
()
->
upath
;
uargv
=
myproc
()
->
uargv
;
barrier
();
myproc
()
->
upath
=
nullptr
;
post_swtch
();
myproc
()
->
run_cpuid_
=
mycpuid
();
long
r
=
doexec
(
upath
,
uargv
);
myproc
()
->
tf
->
rax
=
r
;
// This stuff would have been called in syscall and syscall_c
// if we returned from the the previous kstack
mtstop
(
myproc
());
mtign
();
if
(
myproc
()
->
killed
)
{
mtstart
(
trap
,
myproc
());
exit
();
}
}
static
void
kstackfree
(
void
*
kstack
)
{
ksfree
(
slab_stack
,
kstack
);
}
void
execswitch
(
proc
*
p
)
{
// Alloc a new kernel stack, set it up, and free the old one
context
*
cntxt
;
trapframe
*
tf
;
char
*
kstack
;
char
*
sp
;
if
((
kstack
=
(
char
*
)
ksalloc
(
slab_stack
))
==
0
)
panic
(
"execswitch: ksalloc"
);
sp
=
kstack
+
KSTACKSIZE
;
sp
-=
sizeof
(
*
p
->
tf
);
tf
=
(
trapframe
*
)
sp
;
// XXX(sbw) we only need the whole tf if exec fails
*
tf
=
*
p
->
tf
;
sp
-=
8
;
// XXX(sbw) we could use the sysret return path
*
(
u64
*
)
sp
=
(
u64
)
trapret
;
sp
-=
sizeof
(
*
p
->
context
);
cntxt
=
(
context
*
)
sp
;
memset
(
cntxt
,
0
,
sizeof
(
*
cntxt
));
cntxt
->
rip
=
(
uptr
)
execstub
;
cwork
*
w
=
new
cwork
();
if
(
w
!=
nullptr
)
{
w
->
rip
=
(
void
*
)
kstackfree
;
w
->
arg0
=
p
->
kstack
;
if
(
wqcrit_push
(
w
,
myproc
()
->
exec_cpuid_
)
<
0
)
{
ksfree
(
slab_stack
,
p
->
kstack
);
delete
w
;
}
}
else
{
ksfree
(
slab_stack
,
p
->
kstack
);
}
p
->
kstack
=
kstack
;
p
->
context
=
cntxt
;
p
->
tf
=
tf
;
}
proc
*
proc
::
alloc
(
void
)
{
...
...
kernel/sched.cc
浏览文件 @
6ca8f6fd
...
...
@@ -227,7 +227,9 @@ scheddump(void)
void
addrun
(
struct
proc
*
p
)
{
ANON_REGION
(
__func__
,
&
perfgroup
);
if
(
p
->
upath
)
execswitch
(
p
);
p
->
set_state
(
RUNNABLE
);
schedule_
[
p
->
cpuid
].
enq
(
p
);
}
...
...
kernel/sysfile.cc
浏览文件 @
6ca8f6fd
...
...
@@ -430,9 +430,8 @@ sys_chdir(const char *path)
return
0
;
}
//SYSCALL
int
sys_exec
(
const
char
*
upath
,
userptr
<
userptr
<
const
char
>
>
uargv
)
doexec
(
const
char
*
upath
,
userptr
<
userptr
<
const
char
>
>
uargv
)
{
ANON_REGION
(
__func__
,
&
perfgroup
);
char
*
argv
[
MAXARG
];
...
...
@@ -459,16 +458,33 @@ sys_exec(const char *upath, userptr<userptr<const char> > uargv)
if
(
argv
[
i
]
==
nullptr
||
fetchstr
(
argv
[
i
],
(
char
*
)
uarg
,
MAXARGLEN
)
<
0
)
goto
clean
;
}
argv
[
i
]
=
0
;
r
=
exec
(
path
,
argv
,
&
ascope
);
clean:
for
(
i
=
i
-
i
;
i
>=
0
;
i
--
)
for
(
i
=
i
-
1
;
i
>=
0
;
i
--
)
kmfree
(
argv
[
i
],
MAXARGLEN
);
return
r
;
}
//SYSCALL
int
sys_exec
(
const
char
*
upath
,
userptr
<
userptr
<
const
char
>
>
uargv
)
{
#if EXECSWITCH
myproc
()
->
exec_cpuid_
=
mycpuid
();
myproc
()
->
uargv
=
uargv
;
barrier
();
// upath serves as a flag to the scheduler
myproc
()
->
upath
=
upath
;
yield
();
myproc
()
->
upath
=
nullptr
;
#endif
return
doexec
(
upath
,
uargv
);
}
//SYSCALL
int
sys_pipe
(
int
*
fd
)
{
struct
file
*
rf
,
*
wf
;
...
...
param.h
浏览文件 @
6ca8f6fd
...
...
@@ -24,7 +24,7 @@
#define USERWQSIZE (1 << 14)
#define USTACKPAGES 8
#define WQSHIFT 7
#define
CILKENABLE 0
#define
EXECSWITCH 1
#if defined(HW_qemu)
#define NCPU 8 // maximum number of CPUs
#define MTRACE 0
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论