提交 0ca1c040 创建 作者: Frans Kaashoek's avatar Frans Kaashoek
差异被折叠。
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
start: start:
cli # BIOS enabled interrupts; disable cli # BIOS enabled interrupts; disable
# Set up the important data segment registers (DS, ES, SS). # Zero data segment registers DS, ES, and SS.
xorw %ax,%ax # Segment number zero xorw %ax,%ax # Set %ax to zero
movw %ax,%ds # -> Data Segment movw %ax,%ds # -> Data Segment
movw %ax,%es # -> Extra Segment movw %ax,%es # -> Extra Segment
movw %ax,%ss # -> Stack Segment movw %ax,%ss # -> Stack Segment
...@@ -37,7 +37,7 @@ seta20.2: ...@@ -37,7 +37,7 @@ seta20.2:
outb %al,$0x60 outb %al,$0x60
# Switch from real to protected mode. Use a bootstrap GDT that makes # Switch from real to protected mode. Use a bootstrap GDT that makes
# virtual addresses map dierctly to physical addresses so that the # virtual addresses map directly to physical addresses so that the
# effective memory map doesn't change during the transition. # effective memory map doesn't change during the transition.
lgdt gdtdesc lgdt gdtdesc
movl %cr0, %eax movl %cr0, %eax
......
...@@ -53,7 +53,7 @@ printint(int xx, int base, int sign) ...@@ -53,7 +53,7 @@ printint(int xx, int base, int sign)
void void
cprintf(char *fmt, ...) cprintf(char *fmt, ...)
{ {
int i, c, state, locking; int i, c, locking;
uint *argp; uint *argp;
char *s; char *s;
...@@ -65,7 +65,6 @@ cprintf(char *fmt, ...) ...@@ -65,7 +65,6 @@ cprintf(char *fmt, ...)
panic("null fmt"); panic("null fmt");
argp = (uint*)(void*)(&fmt + 1); argp = (uint*)(void*)(&fmt + 1);
state = 0;
for(i = 0; (c = fmt[i] & 0xff) != 0; i++){ for(i = 0; (c = fmt[i] & 0xff) != 0; i++){
if(c != '%'){ if(c != '%'){
consputc(c); consputc(c);
......
...@@ -62,11 +62,10 @@ extern uchar ioapicid; ...@@ -62,11 +62,10 @@ extern uchar ioapicid;
void ioapicinit(void); void ioapicinit(void);
// kalloc.c // kalloc.c
char* enter_alloc(void);
char* kalloc(void); char* kalloc(void);
void kfree(char*); void kfree(char*);
void kinit(void); void kinit1(void*, void*);
uint detect_memory(void); void kinit2(void*, void*);
// kbd.c // kbd.c
void kbdintr(void); void kbdintr(void);
...@@ -165,7 +164,7 @@ void uartputc(int); ...@@ -165,7 +164,7 @@ void uartputc(int);
void seginit(void); void seginit(void);
void kvmalloc(void); void kvmalloc(void);
void vmenable(void); void vmenable(void);
pde_t* setupkvm(char* (*alloc)()); pde_t* setupkvm();
char* uva2ka(pde_t*, char*); char* uva2ka(pde_t*, char*);
int allocuvm(pde_t*, uint, uint); int allocuvm(pde_t*, uint, uint);
int deallocuvm(pde_t*, uint, uint); int deallocuvm(pde_t*, uint, uint);
......
...@@ -36,7 +36,7 @@ multiboot_header: ...@@ -36,7 +36,7 @@ multiboot_header:
.globl _start .globl _start
_start = V2P_WO(entry) _start = V2P_WO(entry)
# Entering xv6 on boot processor. Machine is mostly set up. # Entering xv6 on boot processor, with paging off.
.globl entry .globl entry
entry: entry:
# Turn on page size extension for 4Mbyte pages # Turn on page size extension for 4Mbyte pages
......
...@@ -9,42 +9,45 @@ ...@@ -9,42 +9,45 @@
#include "mmu.h" #include "mmu.h"
#include "spinlock.h" #include "spinlock.h"
void freerange(void *vstart, void *vend);
extern char end[]; // first address after kernel loaded from ELF file
struct run { struct run {
struct run *next; struct run *next;
}; };
struct { struct {
struct spinlock lock; struct spinlock lock;
int use_lock;
struct run *freelist; struct run *freelist;
} kmem; } kmem;
extern char end[]; // first address after kernel loaded from ELF file // Initialization happens in two phases.
static char *newend; // 1. main() calls kinit1() while still using entrypgdir to place just
// the pages mapped by entrypgdir on free list.
// A simple page allocator to get off the ground during entry // 2. main() calls kinit2() with the rest of the physical pages
char * // after installing a full page table that maps them on all cores.
enter_alloc(void) void
kinit1(void *vstart, void *vend)
{ {
if (newend == 0) initlock(&kmem.lock, "kmem");
newend = end; kmem.use_lock = 0;
freerange(vstart, vend);
}
if ((uint) newend >= KERNBASE + 0x400000) void
panic("only first 4Mbyte are mapped during entry"); kinit2(void *vstart, void *vend)
void *p = (void*)PGROUNDUP((uint)newend); {
memset(p, 0, PGSIZE); freerange(vstart, vend);
newend = newend + PGSIZE; kmem.use_lock = 1;
return p;
} }
// Initialize free list of physical pages.
void void
kinit(void) freerange(void *vstart, void *vend)
{ {
char *p; char *p;
p = (char*)PGROUNDUP((uint)vstart);
initlock(&kmem.lock, "kmem"); for(; p + PGSIZE <= (char*)vend; p += PGSIZE)
p = (char*)PGROUNDUP((uint)newend);
for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE)
kfree(p); kfree(p);
} }
...@@ -64,11 +67,13 @@ kfree(char *v) ...@@ -64,11 +67,13 @@ kfree(char *v)
// Fill with junk to catch dangling refs. // Fill with junk to catch dangling refs.
memset(v, 1, PGSIZE); memset(v, 1, PGSIZE);
acquire(&kmem.lock); if(kmem.use_lock)
acquire(&kmem.lock);
r = (struct run*)v; r = (struct run*)v;
r->next = kmem.freelist; r->next = kmem.freelist;
kmem.freelist = r; kmem.freelist = r;
release(&kmem.lock); if(kmem.use_lock)
release(&kmem.lock);
} }
// Allocate one 4096-byte page of physical memory. // Allocate one 4096-byte page of physical memory.
...@@ -79,11 +84,13 @@ kalloc(void) ...@@ -79,11 +84,13 @@ kalloc(void)
{ {
struct run *r; struct run *r;
acquire(&kmem.lock); if(kmem.use_lock)
acquire(&kmem.lock);
r = kmem.freelist; r = kmem.freelist;
if(r) if(r)
kmem.freelist = r->next; kmem.freelist = r->next;
release(&kmem.lock); if(kmem.use_lock)
release(&kmem.lock);
return (char*)r; return (char*)r;
} }
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
static void startothers(void); static void startothers(void);
static void mpmain(void) __attribute__((noreturn)); static void mpmain(void) __attribute__((noreturn));
extern pde_t *kpgdir; extern pde_t *kpgdir;
extern char end[]; // first address after kernel loaded from ELF file
// Bootstrap processor starts running C code here. // Bootstrap processor starts running C code here.
// Allocate a real stack and switch to it, first // Allocate a real stack and switch to it, first
...@@ -16,6 +17,7 @@ extern pde_t *kpgdir; ...@@ -16,6 +17,7 @@ extern pde_t *kpgdir;
int int
main(void) main(void)
{ {
kinit1(end, P2V(4*1024*1024)); // phys page allocator
kvmalloc(); // kernel page table kvmalloc(); // kernel page table
mpinit(); // collect info about this machine mpinit(); // collect info about this machine
lapicinit(mpbcpu()); lapicinit(mpbcpu());
...@@ -33,9 +35,9 @@ main(void) ...@@ -33,9 +35,9 @@ main(void)
ideinit(); // disk ideinit(); // disk
if(!ismp) if(!ismp)
timerinit(); // uniprocessor timer timerinit(); // uniprocessor timer
startothers(); // start other processors (must come before kinit) startothers(); // start other processors
kinit(); // initialize memory allocator kinit2(P2V(4*1024*1024), P2V(PHYSTOP)); // must come after startothers()
userinit(); // first user process (must come after kinit) userinit(); // first user process
// Finish setting up this processor in mpmain. // Finish setting up this processor in mpmain.
mpmain(); mpmain();
} }
...@@ -84,12 +86,7 @@ startothers(void) ...@@ -84,12 +86,7 @@ startothers(void)
// Tell entryother.S what stack to use, where to enter, and what // Tell entryother.S what stack to use, where to enter, and what
// pgdir to use. We cannot use kpgdir yet, because the AP processor // pgdir to use. We cannot use kpgdir yet, because the AP processor
// is running in low memory, so we use entrypgdir for the APs too. // is running in low memory, so we use entrypgdir for the APs too.
// kalloc can return addresses above 4Mbyte (the machine may have stack = kalloc();
// much more physical memory than 4Mbyte), which aren't mapped by
// entrypgdir, so we must allocate a stack using enter_alloc();
// this introduces the constraint that xv6 cannot use kalloc until
// after these last enter_alloc invocations.
stack = enter_alloc();
*(void**)(code-4) = stack + KSTACKSIZE; *(void**)(code-4) = stack + KSTACKSIZE;
*(void**)(code-8) = mpenter; *(void**)(code-8) = mpenter;
*(int**)(code-12) = (void *) v2p(entrypgdir); *(int**)(code-12) = (void *) v2p(entrypgdir);
...@@ -109,9 +106,9 @@ startothers(void) ...@@ -109,9 +106,9 @@ startothers(void)
__attribute__((__aligned__(PGSIZE))) __attribute__((__aligned__(PGSIZE)))
pde_t entrypgdir[NPDENTRIES] = { pde_t entrypgdir[NPDENTRIES] = {
// Map VA's [0, 4MB) to PA's [0, 4MB) // Map VA's [0, 4MB) to PA's [0, 4MB)
[0] = (0) + PTE_P + PTE_W + PTE_PS, [0] = (0) | PTE_P | PTE_W | PTE_PS,
// Map VA's [KERNBASE, KERNBASE+4MB) to PA's [0, 4MB) // Map VA's [KERNBASE, KERNBASE+4MB) to PA's [0, 4MB)
[KERNBASE>>PDXSHIFT] = (0) + PTE_P + PTE_W + PTE_PS, [KERNBASE>>PDXSHIFT] = (0) | PTE_P | PTE_W | PTE_PS,
}; };
//PAGEBREAK! //PAGEBREAK!
......
...@@ -10,13 +10,13 @@ ...@@ -10,13 +10,13 @@
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
static inline uint v2p(void *a) { return (uint) a - KERNBASE; } static inline uint v2p(void *a) { return ((uint) (a)) - KERNBASE; }
static inline void *p2v(uint a) { return (void *) a + KERNBASE; } static inline void *p2v(uint a) { return (void *) ((a) + KERNBASE); }
#endif #endif
#define V2P(a) ((uint) a - KERNBASE) #define V2P(a) (((uint) (a)) - KERNBASE)
#define P2V(a) ((void *) a + KERNBASE) #define P2V(a) (((void *) (a)) + KERNBASE)
#define V2P_WO(x) ((x) - KERNBASE) // same as V2P, but without casts #define V2P_WO(x) ((x) - KERNBASE) // same as V2P, but without casts
#define P2V_WO(x) ((x) + KERNBASE) // same as V2P, but without casts #define P2V_WO(x) ((x) + KERNBASE) // same as V2P, but without casts
...@@ -223,11 +223,17 @@ awk ' ...@@ -223,11 +223,17 @@ awk '
grep Pages: all.ps grep Pages: all.ps
# if we have the nice font, use it # if we have the nice font, use it
nicefont=../LucidaSans-Typewriter83 nicefont=LucidaSans-Typewriter83
if [ -f $nicefont ] if [ ! -f ../$nicefont ]
then
if git cat-file blob font:$nicefont > ../$nicefont~; then
mv ../$nicefont~ ../$nicefont
fi
fi
if [ -f ../$nicefont ]
then then
echo nicefont echo nicefont
(sed 1q all.ps; cat $nicefont; sed '1d; s/Courier/LucidaSans-Typewriter83/' all.ps) >allf.ps (sed 1q all.ps; cat ../$nicefont; sed "1d; s/Courier/$nicefont/" all.ps) >allf.ps
else else
echo ugly font! echo ugly font!
cp all.ps allf.ps cp all.ps allf.ps
......
...@@ -43,7 +43,7 @@ seginit(void) ...@@ -43,7 +43,7 @@ seginit(void)
// that corresponds to virtual address va. If alloc!=0, // that corresponds to virtual address va. If alloc!=0,
// create any required page table pages. // create any required page table pages.
static pte_t * static pte_t *
walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void)) walkpgdir(pde_t *pgdir, const void *va, int alloc)
{ {
pde_t *pde; pde_t *pde;
pte_t *pgtab; pte_t *pgtab;
...@@ -52,7 +52,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void)) ...@@ -52,7 +52,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void))
if(*pde & PTE_P){ if(*pde & PTE_P){
pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); pgtab = (pte_t*)p2v(PTE_ADDR(*pde));
} else { } else {
if(!alloc || (pgtab = (pte_t*)alloc()) == 0) if(!alloc || (pgtab = (pte_t*)kalloc()) == 0)
return 0; return 0;
// Make sure all those PTE_P bits are zero. // Make sure all those PTE_P bits are zero.
memset(pgtab, 0, PGSIZE); memset(pgtab, 0, PGSIZE);
...@@ -68,8 +68,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void)) ...@@ -68,8 +68,7 @@ walkpgdir(pde_t *pgdir, const void *va, char* (*alloc)(void))
// physical addresses starting at pa. va and size might not // physical addresses starting at pa. va and size might not
// be page-aligned. // be page-aligned.
static int static int
mappages(pde_t *pgdir, void *va, uint size, uint pa, mappages(pde_t *pgdir, void *va, uint size, uint pa, int perm)
int perm, char* (*alloc)(void))
{ {
char *a, *last; char *a, *last;
pte_t *pte; pte_t *pte;
...@@ -77,7 +76,7 @@ mappages(pde_t *pgdir, void *va, uint size, uint pa, ...@@ -77,7 +76,7 @@ mappages(pde_t *pgdir, void *va, uint size, uint pa,
a = (char*)PGROUNDDOWN((uint)va); a = (char*)PGROUNDDOWN((uint)va);
last = (char*)PGROUNDDOWN(((uint)va) + size - 1); last = (char*)PGROUNDDOWN(((uint)va) + size - 1);
for(;;){ for(;;){
if((pte = walkpgdir(pgdir, a, alloc)) == 0) if((pte = walkpgdir(pgdir, a, 1)) == 0)
return -1; return -1;
if(*pte & PTE_P) if(*pte & PTE_P)
panic("remap"); panic("remap");
...@@ -90,53 +89,56 @@ mappages(pde_t *pgdir, void *va, uint size, uint pa, ...@@ -90,53 +89,56 @@ mappages(pde_t *pgdir, void *va, uint size, uint pa,
return 0; return 0;
} }
// The mappings from logical to virtual are one to one (i.e., // There is one page table per process, plus one that's used when
// segmentation doesn't do anything). There is one page table per // a CPU is not running any process (kpgdir). The kernel uses the
// process, plus one that's used when a CPU is not running any process // current process's page table during system calls and interrupts;
// (kpgdir). A user process uses the same page table as the kernel; the // page protection bits prevent user code from using the kernel's
// page protection bits prevent it from accessing kernel memory. // mappings.
// //
// setupkvm() and exec() set up every page table like this: // setupkvm() and exec() set up every page table like this:
// 0..KERNBASE: user memory (text+data+stack+heap), mapped to some free //
// phys memory // 0..KERNBASE: user memory (text+data+stack+heap), mapped to
// phys memory allocated by the kernel
// KERNBASE..KERNBASE+EXTMEM: mapped to 0..EXTMEM (for I/O space) // KERNBASE..KERNBASE+EXTMEM: mapped to 0..EXTMEM (for I/O space)
// KERNBASE+EXTMEM..KERNBASE+end: mapped to EXTMEM..end kernel, // KERNBASE+EXTMEM..data: mapped to EXTMEM..V2P(data)
// w. no write permission // for the kernel's instructions and r/o data
// KERNBASE+end..KERBASE+PHYSTOP: mapped to end..PHYSTOP, // data..KERNBASE+PHYSTOP: mapped to V2P(data)..PHYSTOP,
// rw data + free memory // rw data + free physical memory
// 0xfe000000..0: mapped direct (devices such as ioapic) // 0xfe000000..0: mapped direct (devices such as ioapic)
// //
// The kernel allocates memory for its heap and for user memory // The kernel allocates physical memory for its heap and for user memory
// between KERNBASE+end and the end of physical memory (PHYSTOP). // between V2P(end) and the end of physical memory (PHYSTOP)
// The user program sits in the bottom of the address space, and the // (directly addressable from end..P2V(PHYSTOP)).
// kernel at the top at KERNBASE.
// This table defines the kernel's mappings, which are present in
// every process's page table.
static struct kmap { static struct kmap {
void *virt; void *virt;
uint phys_start; uint phys_start;
uint phys_end; uint phys_end;
int perm; int perm;
} kmap[] = { } kmap[] = {
{ P2V(0), 0, 1024*1024, PTE_W}, // I/O space { (void*) KERNBASE, 0, EXTMEM, PTE_W}, // I/O space
{ (void*)KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kernel text+rodata { (void*) KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kernel text+rodata
{ data, V2P(data), PHYSTOP, PTE_W}, // kernel data, memory { (void*) data, V2P(data), PHYSTOP, PTE_W}, // kernel data, memory
{ (void*)DEVSPACE, DEVSPACE, 0, PTE_W}, // more devices { (void*) DEVSPACE, DEVSPACE, 0, PTE_W}, // more devices
}; };
// Set up kernel part of a page table. // Set up kernel part of a page table.
pde_t* pde_t*
setupkvm(char* (*alloc)(void)) setupkvm()
{ {
pde_t *pgdir; pde_t *pgdir;
struct kmap *k; struct kmap *k;
if((pgdir = (pde_t*)alloc()) == 0) if((pgdir = (pde_t*)kalloc()) == 0)
return 0; return 0;
memset(pgdir, 0, PGSIZE); memset(pgdir, 0, PGSIZE);
if (p2v(PHYSTOP) > (void*)DEVSPACE) if (p2v(PHYSTOP) > (void*)DEVSPACE)
panic("PHYSTOP too high"); panic("PHYSTOP too high");
for(k = kmap; k < &kmap[NELEM(kmap)]; k++) for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
if(mappages(pgdir, k->virt, k->phys_end - k->phys_start, if(mappages(pgdir, k->virt, k->phys_end - k->phys_start,
(uint)k->phys_start, k->perm, alloc) < 0) (uint)k->phys_start, k->perm) < 0)
return 0; return 0;
return pgdir; return pgdir;
} }
...@@ -146,7 +148,7 @@ setupkvm(char* (*alloc)(void)) ...@@ -146,7 +148,7 @@ setupkvm(char* (*alloc)(void))
void void
kvmalloc(void) kvmalloc(void)
{ {
kpgdir = setupkvm(enter_alloc); kpgdir = setupkvm();
switchkvm(); switchkvm();
} }
...@@ -185,7 +187,7 @@ inituvm(pde_t *pgdir, char *init, uint sz) ...@@ -185,7 +187,7 @@ inituvm(pde_t *pgdir, char *init, uint sz)
panic("inituvm: more than a page"); panic("inituvm: more than a page");
mem = kalloc(); mem = kalloc();
memset(mem, 0, PGSIZE); memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W|PTE_U);
memmove(mem, init, sz); memmove(mem, init, sz);
} }
...@@ -235,7 +237,7 @@ allocuvm(pde_t *pgdir, uint oldsz, uint newsz) ...@@ -235,7 +237,7 @@ allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
return 0; return 0;
} }
memset(mem, 0, PGSIZE); memset(mem, 0, PGSIZE);
mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
} }
return newsz; return newsz;
} }
...@@ -312,7 +314,7 @@ copyuvm(pde_t *pgdir, uint sz) ...@@ -312,7 +314,7 @@ copyuvm(pde_t *pgdir, uint sz)
uint pa, i; uint pa, i;
char *mem; char *mem;
if((d = setupkvm(kalloc)) == 0) if((d = setupkvm()) == 0)
return 0; return 0;
for(i = 0; i < sz; i += PGSIZE){ for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
...@@ -323,7 +325,7 @@ copyuvm(pde_t *pgdir, uint sz) ...@@ -323,7 +325,7 @@ copyuvm(pde_t *pgdir, uint sz)
if((mem = kalloc()) == 0) if((mem = kalloc()) == 0)
goto bad; goto bad;
memmove(mem, (char*)p2v(pa), PGSIZE); memmove(mem, (char*)p2v(pa), PGSIZE);
if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U, kalloc) < 0) if(mappages(d, (void*)i, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0)
goto bad; goto bad;
} }
return d; return d;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论