Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
X
xv6-public
项目
项目
详情
活动
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
问题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
银宸时代
OS Lab Group
奖励实验
xv6-public
提交
5be0039c
提交
5be0039c
8月 10, 2006
创建
作者:
rtm
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
interrupts could be recursive since lapic_eoi() called before rti
so fast interrupts overflow the kernel stack fix: cli() before lapic_eoi()
上级
8a8be1b8
显示空白字符变更
内嵌
并排
正在显示
16 个修改的文件
包含
194 行增加
和
28 行删除
+194
-28
Notes
Notes
+75
-0
bio.c
bio.c
+7
-1
console.c
console.c
+21
-10
defs.h
defs.h
+5
-0
fd.c
fd.c
+6
-0
fs.c
fs.c
+7
-1
ide.c
ide.c
+2
-2
kalloc.c
kalloc.c
+2
-1
main.c
main.c
+13
-5
mmu.h
mmu.h
+1
-0
pipe.c
pipe.c
+1
-1
proc.c
proc.c
+14
-2
proc.h
proc.h
+2
-2
spinlock.c
spinlock.c
+16
-2
spinlock.h
spinlock.h
+2
-1
trap.c
trap.c
+20
-0
没有找到文件。
Notes
浏览文件 @
5be0039c
...
...
@@ -279,3 +279,78 @@ BUT now userfs doesn't do the final cat README
AND w/ cprintf("kbd overflow"), panic holding locks in scheduler
maybe also simulataneous panic("interrupt while holding a lock")
again (holding down x key):
kbd overflow
kbd oaaniicloowh
olding locks in scheduler
trap v 33 eip 100F5F c^CNext at t=32166285
(0) [0x0010033e] 0008:0010033e (unk. ctxt): jmp .+0xfffffffe (0x0010033e) ; ebfe
(1) [0x0010005c] 0008:0010005c (unk. ctxt): jmp .+0xfffffffe (0x0010005c) ; ebfe
cpu0 paniced due to holding locks in scheduler
cpu1 got panic("interrupt while holding a lock")
again in lapic_write.
while re-enabling an IRQ?
again:
cpu 0 panic("holding locks in scheduler")
but didn't trigger related panics earlier in scheduler or sched()
of course the panic is right after release() and thus sti()
so we may be seeing an interrupt that left locks held
cpu 1 unknown panic
why does it happen to both cpus at the same time?
again:
cpu 0 panic("holding locks in scheduler")
but trap() didn't see any held locks on return
cpu 1 no apparent panic
again:
cpu 0 panic: holding too many locks in scheduler
cpu 1 panic: kbd_intr returned while holding a lock
again:
cpu 0 panic: holding too man
la 10d70c lr 10027b
those don't seem to be locks...
only place non-constant lock is used is sleep()'s 2nd arg
maybe register not preserved across context switch?
it's in %esi...
sched() doesn't touch %esi
%esi is evidently callee-saved
something to do with interrupts? since ordinarily it works
cpu 1 panic: kbd_int returned while holding a lock
la 107340 lr 107300
console_lock and kbd_lock
maybe console_lock is often not released due to change
in use_console_lock (panic on other cpu)
again:
cpu 0: panic: h...
la 10D78C lr 102CA0
cpu 1: panic: acquire FL_IF (later than cpu 0)
but if sleep() were acquiring random locks, we'd see panics
in release, after sleep() returned.
actually when system is idle, maybe no-one sleeps at all.
just scheduler() and interrupts
questions:
does userfs use pipes? or fork?
no
does anything bad happen if process 1 exits? eg exit() in cat.c
looks ok
are there really no processes left?
lock_init() so we can have a magic number?
HMM maybe the variables at the end of struct cpu are being overwritten
nlocks, lastacquire, lastrelease
by cpu->stack?
adding junk buffers maybe causes crash to take longer...
when do we run on cpu stack?
just in scheduler()?
and interrupts from scheduler()
OH! recursive interrupts will use up any amount of cpu[].stack!
underflow and wrecks *previous* cpu's struct
bio.c
浏览文件 @
5be0039c
...
...
@@ -8,7 +8,13 @@
#include "buf.h"
struct
buf
buf
[
NBUF
];
struct
spinlock
buf_table_lock
=
{
"buf_table"
};
struct
spinlock
buf_table_lock
;
void
binit
(
void
)
{
initlock
(
&
buf_table_lock
,
"buf_table"
);
}
struct
buf
*
getblk
()
...
...
console.c
浏览文件 @
5be0039c
...
...
@@ -3,11 +3,16 @@
#include "defs.h"
#include "spinlock.h"
#include "dev.h"
#include "param.h"
struct
spinlock
console_lock
=
{
"console"
}
;
struct
spinlock
console_lock
;
int
panicked
=
0
;
int
use_console_lock
=
0
;
// per-cpu copy of output to help panic/lock debugging
char
obuf
[
NCPU
][
1024
];
uint
obufi
[
NCPU
];
/*
* copy console output to parallel port, which you can tell
* .bochsrc to copy to the stdout:
...
...
@@ -32,6 +37,10 @@ cons_putc(int c)
ushort
*
crt
=
(
ushort
*
)
0xB8000
;
// base of CGA memory
int
ind
;
obuf
[
rcr4
()][
obufi
[
rcr4
()]
++
]
=
c
;
if
(
obufi
[
rcr4
()]
>=
1024
)
obufi
[
rcr4
()]
=
0
;
if
(
panicked
){
cli
();
for
(;;)
...
...
@@ -101,11 +110,13 @@ printint(int xx, int base, int sgn)
void
cprintf
(
char
*
fmt
,
...)
{
int
i
,
state
=
0
,
c
;
int
i
,
state
=
0
,
c
,
locking
=
0
;
uint
*
ap
=
(
uint
*
)(
void
*
)
&
fmt
+
1
;
if
(
use_console_lock
)
if
(
use_console_lock
){
locking
=
1
;
acquire
(
&
console_lock
);
}
for
(
i
=
0
;
fmt
[
i
];
i
++
){
c
=
fmt
[
i
]
&
0xff
;
...
...
@@ -140,7 +151,7 @@ cprintf(char *fmt, ...)
}
}
if
(
use_console_lock
)
if
(
locking
)
release
(
&
console_lock
);
}
...
...
@@ -293,7 +304,7 @@ static uchar *charcode[4] = {
char
kbd_buf
[
KBD_BUF
];
int
kbd_r
;
int
kbd_w
;
struct
spinlock
kbd_lock
=
{
"kbd_lock"
}
;
struct
spinlock
kbd_lock
;
void
kbd_intr
()
...
...
@@ -303,20 +314,17 @@ kbd_intr()
st
=
inb
(
KBSTATP
);
if
((
st
&
KBS_DIB
)
==
0
){
lapic_eoi
();
return
;
}
data
=
inb
(
KBDATAP
);
if
(
data
==
0xE0
)
{
shift
|=
E0ESC
;
lapic_eoi
();
return
;
}
else
if
(
data
&
0x80
)
{
// Key released
data
=
(
shift
&
E0ESC
?
data
:
data
&
0x7F
);
shift
&=
~
(
shiftcode
[
data
]
|
E0ESC
);
lapic_eoi
();
return
;
}
else
if
(
shift
&
E0ESC
)
{
// Last character was an E0 escape; or with 0x80
...
...
@@ -346,14 +354,17 @@ kbd_intr()
}
release
(
&
kbd_lock
);
lapic_eoi
();
}
void
console_init
()
{
initlock
(
&
console_lock
,
"console"
);
initlock
(
&
kbd_lock
,
"kbd"
);
devsw
[
CONSOLE
].
d_write
=
console_write
;
ioapic_enable
(
IRQ_KBD
,
1
);
use_console_lock
=
1
;
}
defs.h
浏览文件 @
5be0039c
...
...
@@ -10,6 +10,7 @@ void panic(char *s);
void
kbd_intr
(
void
);
// proc.c
void
pinit
(
void
);
struct
proc
;
struct
jmpbuf
;
void
setupsegs
(
struct
proc
*
);
...
...
@@ -67,6 +68,7 @@ void ioapic_enable (int irq, int cpu);
// spinlock.c
struct
spinlock
;
void
initlock
(
struct
spinlock
*
,
char
*
);
void
acquire
(
struct
spinlock
*
);
void
release
(
struct
spinlock
*
);
int
holding
(
struct
spinlock
*
);
...
...
@@ -83,6 +85,7 @@ int pipe_write(struct pipe *p, char *addr, int n);
int
pipe_read
(
struct
pipe
*
p
,
char
*
addr
,
int
n
);
// fd.c
void
fd_init
(
void
);
int
fd_ualloc
(
void
);
struct
fd
*
fd_alloc
(
void
);
void
fd_close
(
struct
fd
*
);
...
...
@@ -97,6 +100,7 @@ void* ide_start_rw(int diskno, uint secno, void *dst, uint nsecs, int read);
int
ide_finish
(
void
*
);
// bio.c
void
binit
(
void
);
struct
buf
;
struct
buf
*
getblk
(
void
);
struct
buf
*
bread
(
uint
,
uint
);
...
...
@@ -104,6 +108,7 @@ void bwrite(uint, struct buf *, uint);
void
brelse
(
struct
buf
*
);
// fs.c
void
iinit
(
void
);
struct
inode
*
iget
(
uint
dev
,
uint
inum
);
void
ilock
(
struct
inode
*
ip
);
void
iunlock
(
struct
inode
*
ip
);
...
...
fd.c
浏览文件 @
5be0039c
...
...
@@ -13,6 +13,12 @@ struct devsw devsw[NDEV];
struct
fd
fds
[
NFD
];
void
fd_init
(
void
)
{
initlock
(
&
fd_table_lock
,
"fd_table"
);
}
/*
* allocate a file descriptor number for curproc.
*/
...
...
fs.c
浏览文件 @
5be0039c
...
...
@@ -13,10 +13,16 @@
// these are inodes currently in use
// an entry is free if count == 0
struct
inode
inode
[
NINODE
];
struct
spinlock
inode_table_lock
=
{
"inode_table"
}
;
struct
spinlock
inode_table_lock
;
uint
rootdev
=
1
;
void
iinit
(
void
)
{
initlock
(
&
inode_table_lock
,
"inode_table"
);
}
static
uint
balloc
(
uint
dev
)
{
...
...
ide.c
浏览文件 @
5be0039c
...
...
@@ -26,7 +26,7 @@ struct ide_request {
};
struct
ide_request
request
[
NREQUEST
];
int
head
,
tail
;
struct
spinlock
ide_lock
=
{
"ide"
}
;
struct
spinlock
ide_lock
;
int
disk_channel
;
...
...
@@ -46,6 +46,7 @@ ide_wait_ready(int check_error)
void
ide_init
(
void
)
{
initlock
(
&
ide_lock
,
"ide"
);
if
(
ncpu
<
2
)
{
panic
(
"ide_init: disk interrupt is going to the second cpu
\n
"
);
}
...
...
@@ -61,7 +62,6 @@ ide_intr(void)
// cprintf("cpu%d: ide_intr\n", cpu());
wakeup
(
&
request
[
tail
]);
release
(
&
ide_lock
);
lapic_eoi
();
}
int
...
...
kalloc.c
浏览文件 @
5be0039c
...
...
@@ -15,7 +15,7 @@
#include "proc.h"
#include "spinlock.h"
struct
spinlock
kalloc_lock
=
{
"kalloc"
}
;
struct
spinlock
kalloc_lock
;
struct
run
{
struct
run
*
next
;
...
...
@@ -37,6 +37,7 @@ kinit(void)
uint
mem
;
char
*
start
;
initlock
(
&
kalloc_lock
,
"kalloc"
);
start
=
(
char
*
)
&
end
;
start
=
(
char
*
)
(((
uint
)
start
+
PAGE
)
&
~
(
PAGE
-
1
));
mem
=
256
;
// XXX
...
...
main.c
浏览文件 @
5be0039c
...
...
@@ -15,8 +15,6 @@ extern uchar _binary_user1_start[], _binary_user1_size[];
extern
uchar
_binary_usertests_start
[],
_binary_usertests_size
[];
extern
uchar
_binary_userfs_start
[],
_binary_userfs_size
[];
extern
int
use_console_lock
;
// CPU 0 starts running C code here.
// This is called main0 not main so that it can have
// a void return type. Gcc can't handle functions named
...
...
@@ -27,28 +25,36 @@ main0(void)
int
i
;
struct
proc
*
p
;
lcr4
(
0
);
// xxx copy of cpu #
// clear BSS
memset
(
edata
,
0
,
end
-
edata
);
// Make sure interrupts stay disabled on all processors
// until each signals it is ready, by pretending to hold
// an extra lock.
for
(
i
=
0
;
i
<
NCPU
;
i
++
)
// xxx maybe replace w/ acquire remembering if FL_IF
for
(
i
=
0
;
i
<
NCPU
;
i
++
){
cpus
[
i
].
nlock
++
;
cpus
[
i
].
guard1
=
0xdeadbeef
;
cpus
[
i
].
guard2
=
0xdeadbeef
;
}
mp_init
();
// collect info about this machine
use_console_lock
=
1
;
lapic_init
(
mp_bcpu
());
cprintf
(
"
\n\n
cpu%d: booting xv6
\n\n
"
,
cpu
());
pinit
();
binit
();
pic_init
();
// initialize PIC
ioapic_init
();
kinit
();
// physical memory allocator
tvinit
();
// trap vectors
idtinit
();
// this CPU's idt register
fd_init
();
iinit
();
// create a fake process per CPU
// so each CPU always has a tss and a gdt
...
...
@@ -101,6 +107,8 @@ main0(void)
void
mpmain
(
void
)
{
lcr4
(
1
);
// xxx copy of cpu #
cprintf
(
"cpu%d: starting
\n
"
,
cpu
());
idtinit
();
// CPU's idt
if
(
cpu
()
==
0
)
...
...
mmu.h
浏览文件 @
5be0039c
...
...
@@ -178,6 +178,7 @@ struct gatedesc {
// Set up a normal interrupt/trap gate descriptor.
// - istrap: 1 for a trap (= exception) gate, 0 for an interrupt gate.
// interrupt gate clears FL_IF, trap gate leaves FL_IF alone
// - sel: Code segment selector for interrupt/trap handler
// - off: Offset in code segment for interrupt/trap handler
// - dpl: Descriptor Privilege Level -
...
...
pipe.c
浏览文件 @
5be0039c
...
...
@@ -34,7 +34,7 @@ pipe_alloc(struct fd **fd1, struct fd **fd2)
p
->
writeopen
=
1
;
p
->
writep
=
0
;
p
->
readp
=
0
;
memset
(
&
p
->
lock
,
0
,
sizeof
(
p
->
lock
)
);
initlock
(
&
p
->
lock
,
"pipe"
);
(
*
fd1
)
->
type
=
FD_PIPE
;
(
*
fd1
)
->
readable
=
1
;
(
*
fd1
)
->
writeable
=
0
;
...
...
proc.c
浏览文件 @
5be0039c
...
...
@@ -7,7 +7,7 @@
#include "defs.h"
#include "spinlock.h"
struct
spinlock
proc_table_lock
=
{
"proc_table"
}
;
struct
spinlock
proc_table_lock
;
struct
proc
proc
[
NPROC
];
struct
proc
*
curproc
[
NCPU
];
...
...
@@ -15,6 +15,12 @@ int next_pid = NCPU;
extern
void
forkret
(
void
);
extern
void
forkret1
(
struct
trapframe
*
);
void
pinit
(
void
)
{
initlock
(
&
proc_table_lock
,
"proc_table"
);
}
/*
* set up a process's task state and segment descriptors
* correctly, given its current size and address in memory.
...
...
@@ -146,6 +152,9 @@ scheduler(void)
// Loop over process table looking for process to run.
acquire
(
&
proc_table_lock
);
for
(
i
=
0
;
i
<
NPROC
;
i
++
){
if
(
cpus
[
cpu
()].
guard1
!=
0xdeadbeef
||
cpus
[
cpu
()].
guard2
!=
0xdeadbeef
)
panic
(
"cpu guard"
);
p
=
&
proc
[
i
];
if
(
p
->
state
!=
RUNNABLE
)
continue
;
...
...
@@ -194,6 +203,7 @@ scheduler(void)
// XXX if not holding proc_table_lock panic.
}
release
(
&
proc_table_lock
);
if
(
cpus
[
cpu
()].
nlock
!=
0
)
...
...
@@ -212,7 +222,9 @@ scheduler(void)
void
sched
(
void
)
{
if
(
setjmp
(
&
curproc
[
cpu
()]
->
jmpbuf
)
==
0
)
struct
proc
*
p
=
curproc
[
cpu
()];
if
(
setjmp
(
&
p
->
jmpbuf
)
==
0
)
longjmp
(
&
cpus
[
cpu
()].
jmpbuf
);
}
...
...
proc.h
浏览文件 @
5be0039c
...
...
@@ -41,8 +41,6 @@ struct proc{
uint
sz
;
// total size of mem, including kernel stack
char
*
kstack
;
// kernel stack, separate from mem so it doesn't move
enum
proc_state
state
;
enum
proc_state
newstate
;
// desired state after swtch()
struct
spinlock
*
mtx
;
// mutex for condition variable
int
pid
;
int
ppid
;
void
*
chan
;
// sleep
...
...
@@ -68,7 +66,9 @@ extern struct proc *curproc[NCPU]; // can be NULL if no proc running.
struct
cpu
{
uchar
apicid
;
// Local APIC ID
struct
jmpbuf
jmpbuf
;
int
guard1
;
char
mpstack
[
MPSTACK
];
// per-cpu start-up stack
int
guard2
;
volatile
int
booted
;
int
nlock
;
// # of locks currently held
struct
spinlock
*
lastacquire
;
// xxx debug
...
...
spinlock.c
浏览文件 @
5be0039c
...
...
@@ -10,9 +10,20 @@
// because cprintf uses them itself.
//#define cprintf dont_use_cprintf
#define LOCKMAGIC 0x6673ffea
extern
int
use_console_lock
;
void
initlock
(
struct
spinlock
*
lock
,
char
*
name
)
{
lock
->
magic
=
LOCKMAGIC
;
lock
->
name
=
name
;
lock
->
locked
=
0
;
lock
->
cpu
=
0xffffffff
;
}
void
getcallerpcs
(
void
*
v
,
uint
pcs
[])
{
uint
*
ebp
=
(
uint
*
)
v
-
2
;
...
...
@@ -27,6 +38,8 @@ getcallerpcs(void *v, uint pcs[])
void
acquire
(
struct
spinlock
*
lock
)
{
if
(
lock
->
magic
!=
LOCKMAGIC
)
panic
(
"weird lock magic"
);
if
(
holding
(
lock
))
panic
(
"acquire"
);
...
...
@@ -45,6 +58,9 @@ acquire(struct spinlock * lock)
void
release
(
struct
spinlock
*
lock
)
{
if
(
lock
->
magic
!=
LOCKMAGIC
)
panic
(
"weird lock magic"
);
if
(
!
holding
(
lock
))
panic
(
"release"
);
...
...
@@ -55,8 +71,6 @@ release(struct spinlock * lock)
lock
->
locked
=
0
;
if
(
--
cpus
[
cpu
()].
nlock
==
0
)
sti
();
// xxx we may have just turned interrupts on during
// an interrupt, is that ok?
}
int
...
...
spinlock.h
浏览文件 @
5be0039c
struct
spinlock
{
uint
magic
;
char
*
name
;
uint
locked
;
uint
pcs
[
10
];
int
cpu
;
uint
pcs
[
10
];
};
trap.c
浏览文件 @
5be0039c
...
...
@@ -41,6 +41,17 @@ trap(struct trapframe *tf)
panic
(
"interrupt while holding a lock"
);
}
if
(
cpu
()
==
1
&&
curproc
[
cpu
()]
==
0
){
if
(
&
tf
<
cpus
[
cpu
()].
mpstack
||
&
tf
>
cpus
[
cpu
()].
mpstack
+
512
){
cprintf
(
"&tf %x mpstack %x
\n
"
,
&
tf
,
cpus
[
cpu
()].
mpstack
);
panic
(
"trap cpu stack"
);
}
}
else
if
(
curproc
[
cpu
()]){
if
(
&
tf
<
curproc
[
cpu
()]
->
kstack
){
panic
(
"trap kstack"
);
}
}
if
(
v
==
T_SYSCALL
){
struct
proc
*
cp
=
curproc
[
cpu
()];
int
num
=
cp
->
tf
->
eax
;
...
...
@@ -97,11 +108,20 @@ trap(struct trapframe *tf)
if
(
v
==
(
IRQ_OFFSET
+
IRQ_IDE
)){
ide_intr
();
if
(
cpus
[
cpu
()].
nlock
)
panic
(
"ide_intr returned while holding a lock"
);
cli
();
// prevent a waiting interrupt from overflowing stack
lapic_eoi
();
return
;
}
if
(
v
==
(
IRQ_OFFSET
+
IRQ_KBD
)){
kbd_intr
();
if
(
cpus
[
cpu
()].
nlock
){
panic
(
"kbd_intr returned while holding a lock"
);
}
cli
();
// prevent a waiting interrupt from overflowing stack
lapic_eoi
();
return
;
}
...
...
编写
预览
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论