Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
6ed206c5
提交
6ed206c5
10月 25, 2011
创建
作者:
Silas Boyd-Wickizer
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
CP -- scheduler 64-bit stuff..
上级
a9766c8f
隐藏空白字符变更
内嵌
并排
正在显示
11 个修改的文件
包含
248 行增加
和
67 行删除
+248
-67
Makefile
Makefile
+1
-0
cpu.h
cpu.h
+4
-5
kernel.h
kernel.h
+12
-0
main.c
main.c
+5
-2
mmu.h
mmu.h
+5
-0
proc.c
proc.c
+151
-34
swtch.S
swtch.S
+5
-0
trap.c
trap.c
+11
-3
vm.c
vm.c
+28
-23
x86.h
x86.h
+23
-0
xv6-mtrace.h
xv6-mtrace.h
+3
-0
没有找到文件。
Makefile
浏览文件 @
6ed206c5
...
...
@@ -14,6 +14,7 @@ OBJS = \
proc.o
\
rcu.o
\
spinlock.o
\
swtch.o
\
string.o
\
uart.o
\
vm.o
\
...
...
cpu.h
浏览文件 @
6ed206c5
...
...
@@ -7,17 +7,16 @@ struct cpu {
int
ncli
;
// Depth of pushcli nesting.
int
intena
;
// Were interrupts enabled before pushcli?
struct
segdesc
gdt
[
NSEGS
];
// x86 global descriptor table
struct
taskstate
ts
;
// Used by x86 to find stack for interrupt
struct
context
*
scheduler
;
// swtch() here to enter scheduler
u64
last_rcu_gc_ticks
;
// Cpu-local storage variables; see below
struct
cpu
*
cpu
;
struct
proc
*
proc
;
// The currently-running process.
struct
kmem
*
kmem
;
// The per-core memory table
#if 0
struct context *scheduler; // swtch() here to enter scheduler
struct taskstate ts; // Used by x86 to find stack for interrupt
volatile uint booted; // Has the CPU started?
int last_rcu_gc_ticks;
#endif
}
__mpalign__
;
...
...
kernel.h
浏览文件 @
6ed206c5
#include "mmu.h"
#define KBASE 0xFFFFFFFF80000000ull
#define PBASE 0xFFFFFF0000000000ull
...
...
@@ -13,6 +15,7 @@ static inline void *p2v(uptr a) { return (void *) a + KBASE; }
struct
spinlock
;
struct
condvar
;
struct
context
;
struct
vmnode
;
struct
inode
;
struct
proc
;
...
...
@@ -151,6 +154,13 @@ int strncmp(const char*, const char*, u32);
char
*
strncpy
(
char
*
,
const
char
*
,
int
);
int
strcmp
(
const
char
*
p
,
const
char
*
q
);
// swtch.S
void
swtch
(
struct
context
**
,
struct
context
*
);
// trap.c
extern
struct
segdesc
bootgdt
[
NSEGS
];
extern
u64
ticks
;
// uart.c
void
uartputc
(
char
c
);
...
...
@@ -162,3 +172,5 @@ int vmap_insert(struct vmap*, struct vmnode *, uptr);
struct
vma
*
vmap_lookup
(
struct
vmap
*
,
uptr
,
uptr
);
int
copyout
(
struct
vmap
*
,
uptr
,
void
*
,
u64
);
void
vmn_free
(
struct
vmnode
*
);
void
switchuvm
(
struct
proc
*
);
void
switchkvm
(
void
);
main.c
浏览文件 @
6ed206c5
...
...
@@ -27,8 +27,11 @@ static volatile int bstate;
void
mpboot
(
void
)
{
// XXX(sbw) load VA for gdt, etc
initseg
();
initlapic
();
inittls
();
scheduler
();
// start running processes
bstate
=
1
;
panic
(
"mpboot"
);
}
...
...
@@ -74,8 +77,8 @@ cmain(void)
initcga
();
initconsole
();
inittrap
();
initseg
();
initpg
();
initseg
();
initmp
();
initlapic
();
inittls
();
...
...
mmu.h
浏览文件 @
6ed206c5
...
...
@@ -118,6 +118,11 @@ struct desctr
u16
limit
;
u64
base
;
}
__attribute__
((
packed
,
aligned
(
16
)));
struct
taskstate
{
u64
rsp0
;
}
__attribute__
((
packed
));
#endif
#define INT_P (1<<7)
/* interrupt descriptor present */
...
...
proc.c
浏览文件 @
6ed206c5
...
...
@@ -19,6 +19,8 @@ struct ns *nspid __mpalign__;
struct
ns
*
nsrunq
__mpalign__
;
static
struct
proc
*
bootproc
__mpalign__
;
enum
{
sched_debug
=
0
};
void
sched
(
void
)
{
...
...
@@ -46,6 +48,21 @@ addrun(struct proc *p)
p
->
on_runq
=
p
->
cpuid
;
}
void
delrun
(
struct
proc
*
p
)
{
#if SPINLOCK_DEBUG
if
(
!
holding
(
&
p
->
lock
))
panic
(
"delrun no p->lock"
);
#endif
if
(
p
->
on_runq
<
0
)
panic
(
"delrun not on runq"
);
if
(
ns_remove
(
nsrunq
,
KI
(
p
->
on_runq
),
p
)
==
0
)
panic
(
"delrun: ns_remove"
);
p
->
on_runq
=
-
1
;
}
// A fork child's very first scheduling by scheduler()
// will swtch here. "Return" to user space.
static
void
...
...
@@ -184,6 +201,140 @@ initproc(void)
idle
[
c
]
=
1
;
}
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
static
void
*
choose_runnable
(
void
*
pp
,
void
*
arg
)
{
struct
proc
*
p
=
pp
;
if
(
p
->
state
==
RUNNABLE
)
return
p
;
return
0
;
}
static
void
*
steal_cb
(
void
*
vk
,
void
*
v
,
void
*
arg
)
{
struct
proc
*
p
=
v
;
acquire
(
&
p
->
lock
);
if
(
p
->
state
!=
RUNNABLE
||
p
->
cpuid
==
mycpu
()
->
id
||
p
->
cpu_pin
)
{
release
(
&
p
->
lock
);
return
0
;
}
if
(
p
->
curcycles
==
0
||
p
->
curcycles
>
MINCYCTHRESH
)
{
if
(
sched_debug
)
cprintf
(
"cpu%d: steal %d (cycles=%d) from %d
\n
"
,
mycpu
()
->
id
,
p
->
pid
,
(
int
)
p
->
curcycles
,
p
->
cpuid
);
delrun
(
p
);
p
->
curcycles
=
0
;
p
->
cpuid
=
mycpu
()
->
id
;
addrun
(
p
);
release
(
&
p
->
lock
);
return
p
;
}
release
(
&
p
->
lock
);
return
0
;
}
int
steal
(
void
)
{
void
*
stole
=
ns_enumerate
(
nsrunq
,
steal_cb
,
0
);
return
stole
?
1
:
0
;
}
void
scheduler
(
void
)
{
// allocate a fake PID for each scheduler thread
struct
proc
*
schedp
=
allocproc
();
if
(
!
schedp
)
panic
(
"scheduler allocproc"
);
mycpu
()
->
proc
=
schedp
;
myproc
()
->
cpu_pin
=
1
;
// Enabling mtrace calls in scheduler generates many mtrace_call_entrys.
// mtrace_call_set(1, cpu->id);
mtrace_kstack_start
(
scheduler
,
schedp
);
for
(;;){
// Enable interrupts on this processor.
sti
();
struct
proc
*
p
=
ns_enumerate_key
(
nsrunq
,
KI
(
mycpu
()
->
id
),
choose_runnable
,
0
);
if
(
p
)
{
acquire
(
&
p
->
lock
);
if
(
p
->
state
!=
RUNNABLE
)
{
release
(
&
p
->
lock
);
}
else
{
if
(
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
0
;
// Switch to chosen process. It is the process's job
// to release proc->lock and then reacquire it
// before jumping back to us.
mycpu
()
->
proc
=
p
;
switchuvm
(
p
);
p
->
state
=
RUNNING
;
p
->
tsc
=
rdtsc
();
mtrace_kstack_pause
(
schedp
);
if
(
p
->
context
->
rip
!=
(
uptr
)
forkret
&&
p
->
context
->
rip
!=
(
uptr
)
rcu_gc_worker
)
{
mtrace_kstack_resume
(
proc
);
}
mtrace_call_set
(
1
,
mycpu
()
->
id
);
swtch
(
&
mycpu
()
->
scheduler
,
myproc
()
->
context
);
mtrace_kstack_resume
(
schedp
);
mtrace_call_set
(
0
,
mycpu
()
->
id
);
switchkvm
();
// Process is done running for now.
// It should have changed its p->state before coming back.
mycpu
()
->
proc
=
schedp
;
if
(
p
->
state
!=
RUNNABLE
)
delrun
(
p
);
release
(
&
p
->
lock
);
}
}
else
{
if
(
steal
())
{
if
(
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
0
;
}
else
{
if
(
!
idle
[
mycpu
()
->
id
])
idle
[
mycpu
()
->
id
]
=
1
;
}
}
int
now
=
ticks
;
if
(
now
-
mycpu
()
->
last_rcu_gc_ticks
>
100
)
{
rcu_gc
();
mycpu
()
->
last_rcu_gc_ticks
=
now
;
}
if
(
idle
[
mycpu
()
->
id
])
{
sti
();
hlt
();
}
}
}
...
...
@@ -634,40 +785,6 @@ migrate(struct proc *p)
}
}
static void *
steal_cb(void *vk, void *v, void *arg)
{
struct proc *p = v;
acquire(&p->lock);
if (p->state != RUNNABLE || p->cpuid == cpu->id || p->cpu_pin) {
release(&p->lock);
return 0;
}
if (p->curcycles == 0 || p->curcycles > MINCYCTHRESH) {
if (sched_debug)
cprintf("cpu%d: steal %d (cycles=%d) from %d\n",
cpu->id, p->pid, (int)p->curcycles, p->cpuid);
delrun(p);
p->curcycles = 0;
p->cpuid = cpu->id;
addrun(p);
release(&p->lock);
return p;
}
release(&p->lock);
return 0;
}
int
steal(void)
{
void *stole = ns_enumerate(nsrunq, steal_cb, 0);
return stole ? 1 : 0;
}
//PAGEBREAK: 42
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
...
...
swtch.S
浏览文件 @
6ed206c5
...
...
@@ -7,6 +7,8 @@
.globl swtch
swtch:
jmp swtch
#if 0
movl 4(%esp), %eax
movl 8(%esp), %edx
...
...
@@ -26,3 +28,5 @@ swtch:
popl %ebx
popl %ebp
ret
#endif
\ No newline at end of file
trap.c
浏览文件 @
6ed206c5
#include "types.h"
#include "param.h"
#include "mmu.h"
#include "kernel.h"
#include "x86.h"
#include "cpu.h"
u64
ticks
__mpalign__
;
struct
segdesc
__attribute__
((
aligned
(
16
)))
bootgdt
[
NSEGS
]
=
{
// null
...
...
@@ -48,13 +52,17 @@ void
initseg
(
void
)
{
volatile
struct
desctr
dtr
;
struct
cpu
*
c
;
dtr
.
limit
=
sizeof
(
idt
)
-
1
;
dtr
.
base
=
(
u64
)
idt
;
lidt
((
void
*
)
&
dtr
.
limit
);
// Reload GDT from kernel VA
dtr
.
limit
=
sizeof
(
bootgdt
)
-
1
;
dtr
.
base
=
(
u64
)
bootgdt
;
// TLS might not be ready
c
=
&
cpus
[
cpunum
()];
// Load per-CPU GDT
memmove
(
c
->
gdt
,
bootgdt
,
sizeof
(
bootgdt
));
dtr
.
limit
=
sizeof
(
c
->
gdt
)
-
1
;
dtr
.
base
=
(
u64
)
c
->
gdt
;
lgdt
((
void
*
)
&
dtr
.
limit
);
}
vm.c
浏览文件 @
6ed206c5
...
...
@@ -96,14 +96,41 @@ static pml4e_t*
setupkvm
(
void
)
{
pml4e_t
*
pml4
;
int
k
;
if
((
pml4
=
(
pml4e_t
*
)
kalloc
())
==
0
)
return
0
;
memmove
(
pml4
,
kpml4
,
PGSIZE
);
k
=
512
-
PML4X
(
PBASE
);
memmove
(
&
pml4
[
k
],
&
kpml4
[
k
],
8
*
(
512
-
k
));
return
pml4
;
}
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
switchkvm
(
void
)
{
lcr3
(
v2p
(
kpml4
));
// switch to the kernel page table
}
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm
(
struct
proc
*
p
)
{
u64
base
=
(
u64
)
&
mycpu
()
->
ts
;
pushcli
();
mycpu
()
->
gdt
[
TSSSEG
>>
3
]
=
(
struct
segdesc
)
SEGDESC
(
base
,
(
sizeof
(
mycpu
()
->
ts
)
-
1
),
SEG_P
|
SEG_TSS64A
);
mycpu
()
->
gdt
[(
TSSSEG
>>
3
)
+
1
]
=
(
struct
segdesc
)
SEGDESCHI
(
base
);
mycpu
()
->
ts
.
rsp0
=
(
u64
)
myproc
()
->
kstack
+
KSTACKSIZE
;
ltr
(
TSSSEG
);
if
(
p
->
vmap
==
0
||
p
->
vmap
->
pml4
==
0
)
panic
(
"switchuvm: no vmap/pml4"
);
lcr3
(
v2p
(
p
->
vmap
->
pml4
));
// switch to new address space
popcli
();
}
static
struct
vma
*
vma_alloc
(
void
)
{
...
...
@@ -498,29 +525,7 @@ vmenable(void)
__asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (SEG_KCODE << 3)); // reload cs
}
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
switchkvm(void)
{
lcr3(v2p(kpgdir)); // switch to the kernel page table
}
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm(struct proc *p)
{
pushcli();
cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0);
cpu->gdt[SEG_TSS].s = 0;
cpu->ts.ss0 = SEG_KDATA << 3;
cpu->ts.esp0 = (uint) proc->kstack + KSTACKSIZE;
ltr(SEG_TSS << 3);
if(p->vmap == 0 || p->vmap->pgdir == 0)
panic("switchuvm: no vmap/pgdir");
lcr3(v2p(p->vmap->pgdir)); // switch to new address space
popcli();
}
// Free a page table and all the physical memory pages
// in the user part.
...
...
x86.h
浏览文件 @
6ed206c5
...
...
@@ -83,6 +83,12 @@ lgdt(void *p)
}
static
inline
void
ltr
(
u16
sel
)
{
__asm
volatile
(
"ltr %0"
:
:
"r"
(
sel
));
}
static
inline
void
writegs
(
u16
v
)
{
__asm
volatile
(
"movw %0, %%gs"
:
:
"r"
(
v
));
...
...
@@ -104,6 +110,23 @@ writemsr(u32 msr, u64 val)
__asm
volatile
(
"wrmsr"
:
:
"c"
(
msr
),
"a"
(
lo
),
"d"
(
hi
));
}
static
inline
u64
rdtsc
(
void
)
{
u32
hi
,
lo
;
__asm
volatile
(
"rdtsc"
:
"=a"
(
lo
),
"=d"
(
hi
));
return
((
u64
)
lo
)
|
(((
u64
)
hi
)
<<
32
);
}
static
inline
void
hlt
(
void
)
{
__asm
volatile
(
"hlt"
);
}
static
inline
void
lcr3
(
u64
val
)
{
__asm
volatile
(
"movq %0,%%cr3"
:
:
"r"
(
val
));
}
// Layout of the trap frame built on the stack by the
// hardware and by trapasm.S, and passed to trap().
struct
trapframe
{
...
...
xv6-mtrace.h
浏览文件 @
6ed206c5
...
...
@@ -22,4 +22,7 @@ char* strncpy(char *s, const char *t, int n);
#define mtrace_label_register(t, r, x, y, z, ip)
#define mtrace_kstack_start(x, y)
#define mtrace_kstack_stop(x)
#define mtrace_kstack_pause(x)
#define mtrace_kstack_resume(x)
#define mtrace_call_set(x, y)
#endif
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论