提交 554dd487 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

merge

...@@ -72,7 +72,7 @@ AS = $(TOOLPREFIX)gas ...@@ -72,7 +72,7 @@ AS = $(TOOLPREFIX)gas
LD = $(TOOLPREFIX)ld LD = $(TOOLPREFIX)ld
OBJCOPY = $(TOOLPREFIX)objcopy OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump OBJDUMP = $(TOOLPREFIX)objdump
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -std=c99
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector) CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
ASFLAGS = -m32 -gdwarf-2 ASFLAGS = -m32 -gdwarf-2
# FreeBSD ld wants ``elf_i386_fbsd'' # FreeBSD ld wants ``elf_i386_fbsd''
......
...@@ -7,6 +7,7 @@ struct proc; ...@@ -7,6 +7,7 @@ struct proc;
struct spinlock; struct spinlock;
struct condvar; struct condvar;
struct stat; struct stat;
struct vmnode;
// bio.c // bio.c
void binit(void); void binit(void);
...@@ -166,15 +167,20 @@ void kvmalloc(void); ...@@ -166,15 +167,20 @@ void kvmalloc(void);
void vmenable(void); void vmenable(void);
pde_t* setupkvm(void); pde_t* setupkvm(void);
char* uva2ka(pde_t*, char*); char* uva2ka(pde_t*, char*);
int allocuvm(pde_t*, uint, uint); struct vmnode * vmn_allocpg(uint);
int deallocuvm(pde_t*, uint, uint); void vmn_free(struct vmnode *);
int vmn_load(struct vmnode *, struct inode*, uint, uint);
struct vmap * vmap_alloc(void);
void vmap_free(struct vmap *);
int vmap_insert(struct vmap *, struct vmnode *n, uint);
struct vma * vmap_lookup(struct vmap *, uint);
struct vmap * vmap_copy(struct vmap *);
void freevm(pde_t*); void freevm(pde_t*);
void inituvm(pde_t*, char*, uint);
int loaduvm(pde_t*, char*, struct inode*, uint, uint);
pde_t* copyuvm(pde_t*, uint);
void switchuvm(struct proc*); void switchuvm(struct proc*);
void switchkvm(void); void switchkvm(void);
int copyout(pde_t*, uint, void*, uint); int copyout(struct vmap *, uint, void*, uint);
int copyin(struct vmap *, uint, void*, uint);
int pagefault(pde_t*, struct vmap *, uint);
// number of elements in fixed-size array // number of elements in fixed-size array
#define NELEM(x) (sizeof(x)/sizeof((x)[0])) #define NELEM(x) (sizeof(x)/sizeof((x)[0]))
...@@ -13,12 +13,14 @@ int ...@@ -13,12 +13,14 @@ int
exec(char *path, char **argv) exec(char *path, char **argv)
{ {
char *s, *last; char *s, *last;
int i, off; int i, off, brk = 0;
uint argc, sz, sp, ustack[3+MAXARG+1]; uint argc, sp, ustack[3+MAXARG+1];
struct elfhdr elf; struct elfhdr elf;
struct inode *ip; struct inode *ip = 0;
struct proghdr ph; struct proghdr ph;
pde_t *pgdir, *oldpgdir; pde_t *pgdir = 0, *oldpgdir;
struct vmap *vmap = 0, *oldvmap;
struct vmnode *vmn = 0;
if((ip = namei(path)) == 0) if((ip = namei(path)) == 0)
return -1; return -1;
...@@ -34,8 +36,10 @@ exec(char *path, char **argv) ...@@ -34,8 +36,10 @@ exec(char *path, char **argv)
if((pgdir = setupkvm()) == 0) if((pgdir = setupkvm()) == 0)
goto bad; goto bad;
if((vmap = vmap_alloc()) == 0)
goto bad;
// Load program into memory. // Load program into memory.
sz = 0;
for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){ for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){
if(readi(ip, (char*)&ph, off, sizeof(ph)) != sizeof(ph)) if(readi(ip, (char*)&ph, off, sizeof(ph)) != sizeof(ph))
goto bad; goto bad;
...@@ -43,27 +47,51 @@ exec(char *path, char **argv) ...@@ -43,27 +47,51 @@ exec(char *path, char **argv)
continue; continue;
if(ph.memsz < ph.filesz) if(ph.memsz < ph.filesz)
goto bad; goto bad;
if((sz = allocuvm(pgdir, sz, ph.va + ph.memsz)) == 0) if(ph.va % PGSIZE) {
cprintf("unaligned ph.va\n");
goto bad;
}
uint va_start = PGROUNDDOWN(ph.va);
uint va_end = PGROUNDUP(ph.va + ph.memsz);
if(va_end > brk)
brk = va_end;
int npg = (va_end - va_start) / PGSIZE;
if ((vmn = vmn_allocpg(npg)) == 0)
goto bad;
if(vmn_load(vmn, ip, ph.offset, ph.filesz) < 0)
goto bad; goto bad;
if(loaduvm(pgdir, (char*)ph.va, ip, ph.offset, ph.filesz) < 0) if(vmap_insert(vmap, vmn, ph.va) < 0)
goto bad; goto bad;
vmn = 0;
} }
iunlockput(ip); iunlockput(ip);
ip = 0; ip = 0;
// Allocate a one-page stack at the next page boundary // Allocate a vmnode for the heap.
sz = PGROUNDUP(sz); // XXX pre-allocate 32 pages..
if((sz = allocuvm(pgdir, sz, sz + PGSIZE)) == 0) if((vmn = vmn_allocpg(32)) == 0)
goto bad;
if(vmap_insert(vmap, vmn, brk) < 0)
goto bad;
vmn = 0;
// Allocate a one-page stack at the top of the (user) address space
if((vmn = vmn_allocpg(1)) == 0)
goto bad;
if(vmap_insert(vmap, vmn, USERTOP-PGSIZE) < 0)
goto bad; goto bad;
vmn = 0;
// Push argument strings, prepare rest of stack in ustack. // Push argument strings, prepare rest of stack in ustack.
sp = sz; sp = USERTOP;
for(argc = 0; argv[argc]; argc++) { for(argc = 0; argv[argc]; argc++) {
if(argc >= MAXARG) if(argc >= MAXARG)
goto bad; goto bad;
sp -= strlen(argv[argc]) + 1; sp -= strlen(argv[argc]) + 1;
sp &= ~3; sp &= ~3;
if(copyout(pgdir, sp, argv[argc], strlen(argv[argc]) + 1) < 0) if(copyout(vmap, sp, argv[argc], strlen(argv[argc]) + 1) < 0)
goto bad; goto bad;
ustack[3+argc] = sp; ustack[3+argc] = sp;
} }
...@@ -74,7 +102,7 @@ exec(char *path, char **argv) ...@@ -74,7 +102,7 @@ exec(char *path, char **argv)
ustack[2] = sp - (argc+1)*4; // argv pointer ustack[2] = sp - (argc+1)*4; // argv pointer
sp -= (3+argc+1) * 4; sp -= (3+argc+1) * 4;
if(copyout(pgdir, sp, ustack, (3+argc+1)*4) < 0) if(copyout(vmap, sp, ustack, (3+argc+1)*4) < 0)
goto bad; goto bad;
// Save program name for debugging. // Save program name for debugging.
...@@ -85,12 +113,16 @@ exec(char *path, char **argv) ...@@ -85,12 +113,16 @@ exec(char *path, char **argv)
// Commit to the user image. // Commit to the user image.
oldpgdir = proc->pgdir; oldpgdir = proc->pgdir;
oldvmap = proc->vmap;
proc->pgdir = pgdir; proc->pgdir = pgdir;
proc->sz = sz; proc->vmap = vmap;
proc->brk = brk + 4; // XXX so that brk-1 points within heap vma..
proc->tf->eip = elf.entry; // main proc->tf->eip = elf.entry; // main
proc->tf->esp = sp; proc->tf->esp = sp;
switchuvm(proc); switchuvm(proc);
freevm(oldpgdir); freevm(oldpgdir);
vmap_free(oldvmap);
return 0; return 0;
bad: bad:
...@@ -99,5 +131,9 @@ exec(char *path, char **argv) ...@@ -99,5 +131,9 @@ exec(char *path, char **argv)
freevm(pgdir); freevm(pgdir);
if(ip) if(ip)
iunlockput(ip); iunlockput(ip);
if(vmap)
vmap_free(vmap);
if(vmn)
vmn_free(vmn);
return -1; return -1;
} }
...@@ -90,6 +90,7 @@ kalloc(void) ...@@ -90,6 +90,7 @@ kalloc(void)
release(&kmem->lock); release(&kmem->lock);
if (r == 0) if (r == 0)
cprintf("%d: kalloc out\n", cpunum()); cprintf("%d: kalloc out\n", cpunum());
memset(r, 2, PGSIZE);
return (char*)r; return (char*)r;
} }
...@@ -35,7 +35,7 @@ jmpkstack(void) ...@@ -35,7 +35,7 @@ jmpkstack(void)
if(kstack == 0) if(kstack == 0)
panic("jmpkstack kalloc"); panic("jmpkstack kalloc");
top = kstack + PGSIZE; top = kstack + PGSIZE;
asm volatile("movl %0,%%esp; call mainc" : : "r" (top)); __asm volatile("movl %0,%%esp; call mainc" : : "r" (top));
panic("jmpkstack"); panic("jmpkstack");
} }
......
...@@ -122,7 +122,7 @@ struct segdesc { ...@@ -122,7 +122,7 @@ struct segdesc {
#define PDXSHIFT 22 // offset of PDX in a linear address #define PDXSHIFT 22 // offset of PDX in a linear address
#define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1)) #define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1))
#define PGROUNDDOWN(a) ((char*)((((unsigned int)(a)) & ~(PGSIZE-1)))) #define PGROUNDDOWN(a) ((__typeof__(a))((((unsigned int)(a)) & ~(PGSIZE-1))))
// Page table/directory entry flags. // Page table/directory entry flags.
#define PTE_P 0x001 // Present #define PTE_P 0x001 // Present
......
...@@ -145,8 +145,15 @@ userinit(void) ...@@ -145,8 +145,15 @@ userinit(void)
initproc = p; initproc = p;
if((p->pgdir = setupkvm()) == 0) if((p->pgdir = setupkvm()) == 0)
panic("userinit: out of memory?"); panic("userinit: out of memory?");
inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size); if((p->vmap = vmap_alloc()) == 0)
p->sz = PGSIZE; panic("userinit: out of vmaps?");
struct vmnode *vmn = vmn_allocpg(PGROUNDUP((int)_binary_initcode_size) / PGSIZE);
if(vmn == 0)
panic("userinit: vmn_allocpg");
if(vmap_insert(p->vmap, vmn, 0) < 0)
panic("userinit: vmap_insert");
if(copyout(p->vmap, 0, _binary_initcode_start, (int)_binary_initcode_size) < 0)
panic("userinit: copyout");
memset(p->tf, 0, sizeof(*p->tf)); memset(p->tf, 0, sizeof(*p->tf));
p->tf->cs = (SEG_UCODE << 3) | DPL_USER; p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
p->tf->ds = (SEG_UDATA << 3) | DPL_USER; p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
...@@ -166,17 +173,22 @@ userinit(void) ...@@ -166,17 +173,22 @@ userinit(void)
int int
growproc(int n) growproc(int n)
{ {
uint sz; uint brk = proc->brk;
uint nbrk = brk + n;
sz = proc->sz; struct vma *vma = vmap_lookup(proc->vmap, brk-1);
if(n > 0){ if(vma == 0)
if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
return -1; return -1;
} else if(n < 0){
if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) if(nbrk > vma->va_end){
/* XXX */
release(&vma->lock);
cprintf("cannot resize heap: %d -> %d\n", brk, nbrk);
return -1; return -1;
} }
proc->sz = sz;
proc->brk = brk + n;
release(&vma->lock);
switchuvm(proc); switchuvm(proc);
return 0; return 0;
} }
...@@ -194,14 +206,22 @@ fork(void) ...@@ -194,14 +206,22 @@ fork(void)
if((np = allocproc()) == 0) if((np = allocproc()) == 0)
return -1; return -1;
if((np->pgdir = setupkvm()) == 0){
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
return -1;
}
// Copy process state from p. // Copy process state from p.
if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){ if((np->vmap = vmap_copy(proc->vmap)) == 0){
freevm(np->pgdir);
kfree(np->kstack); kfree(np->kstack);
np->kstack = 0; np->kstack = 0;
np->state = UNUSED; np->state = UNUSED;
return -1; return -1;
} }
np->sz = proc->sz; np->brk = proc->brk;
np->parent = proc; np->parent = proc;
*np->tf = *proc->tf; *np->tf = *proc->tf;
...@@ -294,6 +314,7 @@ wait(void) ...@@ -294,6 +314,7 @@ wait(void)
kfree(p->kstack); kfree(p->kstack);
p->kstack = 0; p->kstack = 0;
freevm(p->pgdir); freevm(p->pgdir);
vmap_free(p->vmap);
p->state = UNUSED; p->state = UNUSED;
p->pid = 0; p->pid = 0;
p->parent = 0; p->parent = 0;
......
#include "spinlock.h"
// Segments in proc->gdt. // Segments in proc->gdt.
// Also known to bootasm.S and trapasm.S // Also known to bootasm.S and trapasm.S
#define SEG_KCODE 1 // kernel code #define SEG_KCODE 1 // kernel code
...@@ -8,7 +10,6 @@ ...@@ -8,7 +10,6 @@
#define SEG_TSS 6 // this process's task state #define SEG_TSS 6 // this process's task state
#define NSEGS 7 #define NSEGS 7
//PAGEBREAK: 17 //PAGEBREAK: 17
// Saved registers for kernel context switches. // Saved registers for kernel context switches.
// Don't need to save all the segment registers (%cs, etc), // Don't need to save all the segment registers (%cs, etc),
...@@ -30,10 +31,32 @@ struct context { ...@@ -30,10 +31,32 @@ struct context {
enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE }; enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Virtual memory
struct vmnode {
uint npages;
char *page[32];
uint ref;
uint alloc;
};
struct vma {
uint va_start; // start of mapping
uint va_end; // one past the last byte
struct vmnode *n;
struct spinlock lock; // serialize fault/unmap
};
struct vmap {
struct vma e[16];
struct spinlock lock; // serialize map/lookup/unmap
uint alloc;
};
// Per-process state // Per-process state
struct proc { struct proc {
uint sz; // Size of process memory (bytes) struct vmap *vmap; // va -> vma
pde_t* pgdir; // Page table pde_t* pgdir; // Page table
uint brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process char *kstack; // Bottom of kernel stack for this process
enum procstate state; // Process state enum procstate state; // Process state
volatile int pid; // Process ID volatile int pid; // Process ID
...@@ -110,8 +133,8 @@ extern int ncpu; ...@@ -110,8 +133,8 @@ extern int ncpu;
// holding those two variables in the local cpu's struct cpu. // holding those two variables in the local cpu's struct cpu.
// This is similar to how thread-local variables are implemented // This is similar to how thread-local variables are implemented
// in thread libraries such as Linux pthreads. // in thread libraries such as Linux pthreads.
extern struct cpu *cpu asm("%gs:0"); // &cpus[cpunum()] extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()]
extern struct proc *proc asm("%gs:4"); // cpus[cpunum()].proc extern struct proc *proc __asm("%gs:4"); // cpus[cpunum()].proc
extern struct ptable *ptable asm("%gs:8"); // &ptables[cpunum()] extern struct ptable *ptable __asm("%gs:8"); // &ptables[cpunum()]
extern struct kmem *kmem asm("%gs:12"); // &kmems[cpunum()] extern struct kmem *kmem __asm("%gs:12"); // &kmems[cpunum()]
extern struct runq *runq asm("%gs:16"); // &runqs[cpunum()] extern struct runq *runq __asm("%gs:16"); // &runqs[cpunum()]
#pragma once
// Mutual exclusion lock. // Mutual exclusion lock.
struct spinlock { struct spinlock {
uint locked; // Is the lock held? uint locked; // Is the lock held?
......
...@@ -19,8 +19,10 @@ ...@@ -19,8 +19,10 @@
int int
fetchint(struct proc *p, uint addr, int *ip) fetchint(struct proc *p, uint addr, int *ip)
{ {
#if 0 /* XXX use pagefault() */
if(addr >= p->sz || addr+4 > p->sz) if(addr >= p->sz || addr+4 > p->sz)
return -1; return -1;
#endif
*ip = *(int*)(addr); *ip = *(int*)(addr);
return 0; return 0;
} }
...@@ -33,10 +35,16 @@ fetchstr(struct proc *p, uint addr, char **pp) ...@@ -33,10 +35,16 @@ fetchstr(struct proc *p, uint addr, char **pp)
{ {
char *s, *ep; char *s, *ep;
#if 0 /* XXX use pagefault() */
if(addr >= p->sz) if(addr >= p->sz)
return -1; return -1;
#endif
*pp = (char*)addr; *pp = (char*)addr;
#if 0 /* XXX use pagefault() */
ep = (char*)p->sz; ep = (char*)p->sz;
#else
ep = (char *) 0xffffffff;
#endif
for(s = *pp; s < ep; s++) for(s = *pp; s < ep; s++)
if(*s == 0) if(*s == 0)
return s - *pp; return s - *pp;
...@@ -60,8 +68,10 @@ argptr(int n, char **pp, int size) ...@@ -60,8 +68,10 @@ argptr(int n, char **pp, int size)
if(argint(n, &i) < 0) if(argint(n, &i) < 0)
return -1; return -1;
#if 0 /* XXX use pagefault() */
if((uint)i >= proc->sz || (uint)i+size > proc->sz) if((uint)i >= proc->sz || (uint)i+size > proc->sz)
return -1; return -1;
#endif
*pp = (char*)i; *pp = (char*)i;
return 0; return 0;
} }
......
...@@ -20,3 +20,5 @@ ...@@ -20,3 +20,5 @@
#define SYS_sbrk 19 #define SYS_sbrk 19
#define SYS_sleep 20 #define SYS_sleep 20
#define SYS_uptime 21 #define SYS_uptime 21
#define SYS_map 22
#define SYS_unmap 23
...@@ -51,7 +51,7 @@ sys_sbrk(void) ...@@ -51,7 +51,7 @@ sys_sbrk(void)
if(argint(0, &n) < 0) if(argint(0, &n) < 0)
return -1; return -1;
addr = proc->sz; addr = proc->brk;
if(growproc(n) < 0) if(growproc(n) < 0)
return -1; return -1;
return addr; return addr;
......
...@@ -47,6 +47,13 @@ trap(struct trapframe *tf) ...@@ -47,6 +47,13 @@ trap(struct trapframe *tf)
return; return;
} }
if(tf->trapno == T_PGFLT){
if(pagefault(proc->pgdir, proc->vmap, rcr2()) >= 0){
switchuvm(proc);
return;
}
}
switch(tf->trapno){ switch(tf->trapno){
case T_IRQ0 + IRQ_TIMER: case T_IRQ0 + IRQ_TIMER:
if(cpu->id == 0){ if(cpu->id == 0){
......
...@@ -1379,7 +1379,7 @@ void ...@@ -1379,7 +1379,7 @@ void
validateint(int *p) validateint(int *p)
{ {
int res; int res;
asm("mov %%esp, %%ebx\n\t" __asm("mov %%esp, %%ebx\n\t"
"mov %3, %%esp\n\t" "mov %3, %%esp\n\t"
"int %2\n\t" "int %2\n\t"
"mov %%ebx, %%esp" : "mov %%ebx, %%esp" :
......
...@@ -191,97 +191,182 @@ switchuvm(struct proc *p) ...@@ -191,97 +191,182 @@ switchuvm(struct proc *p)
popcli(); popcli();
} }
// Load the initcode into address 0 of pgdir. struct {
// sz must be less than a page. struct vmnode n[1024];
} vmnodes;
struct {
struct vmap m[128];
} vmaps;
struct vmnode *
vmn_alloc(void)
{
for(uint i = 0; i < sizeof(vmnodes.n) / sizeof(vmnodes.n[0]); i++) {
struct vmnode *n = &vmnodes.n[i];
if(n->alloc == 0 && __sync_bool_compare_and_swap(&n->alloc, 0, 1)) {
n->npages = 0;
n->ref = 0;
return n;
}
}
panic("out of vmnodes");
}
struct vmnode *
vmn_allocpg(uint npg)
{
struct vmnode *n = vmn_alloc();
if(npg > sizeof(n->page) / sizeof(n->page[0])) {
cprintf("vmnode too big: %d\n", npg);
return 0;
}
for(uint i = 0; i < npg; i++) {
if((n->page[i] = kalloc()) == 0) {
vmn_free(n);
return 0;
}
memset((char *) n->page[i], 0, PGSIZE);
n->npages++;
}
return n;
}
void void
inituvm(pde_t *pgdir, char *init, uint sz) vmn_free(struct vmnode *n)
{ {
char *mem; for(uint i = 0; i < n->npages; i++) {
kfree((char *) n->page[i]);
if(sz >= PGSIZE) n->page[i] = 0;
panic("inituvm: more than a page"); }
mem = kalloc(); n->alloc = 0;
memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, PADDR(mem), PTE_W|PTE_U);
memmove(mem, init, sz);
} }
// Load a program segment into pgdir. addr must be page-aligned void
// and the pages from addr to addr+sz must already be mapped. vmn_decref(struct vmnode *n)
int
loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
{ {
uint i, pa, n; if(__sync_sub_and_fetch(&n->ref, 1) == 0)
pte_t *pte; vmn_free(n);
}
if((uint)addr % PGSIZE != 0) struct vmnode *
panic("loaduvm: addr must be page aligned"); vmn_copy(struct vmnode *n)
for(i = 0; i < sz; i += PGSIZE){ {
if((pte = walkpgdir(pgdir, addr+i, 0)) == 0) struct vmnode *c = vmn_allocpg(n->npages);
panic("loaduvm: address should exist"); if(c != 0)
pa = PTE_ADDR(*pte); for(uint i = 0; i < n->npages; i++)
if(sz - i < PGSIZE) memmove(c->page[i], n->page[i], PGSIZE);
n = sz - i; return c;
else }
n = PGSIZE;
if(readi(ip, (char*)pa, offset+i, n) != n) struct vmap *
vmap_alloc(void)
{
for(uint i = 0; i < sizeof(vmaps.m) / sizeof(vmaps.m[0]); i++) {
struct vmap *m = &vmaps.m[i];
if(m->alloc == 0 && __sync_bool_compare_and_swap(&m->alloc, 0, 1)) {
for(uint j = 0; j < sizeof(m->e) / sizeof(m->e[0]); j++)
m->e[j].n = 0;
return m;
}
}
panic("out of vmaps");
}
void
vmap_free(struct vmap *m)
{
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++)
if(m->e[i].n)
vmn_decref(m->e[i].n);
m->alloc = 0;
}
int
vmap_insert(struct vmap *m, struct vmnode *n, uint va_start)
{
acquire(&m->lock);
uint va_end = va_start + n->npages * PGSIZE;
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
if(m->e[i].n && (m->e[i].va_start < va_end && m->e[i].va_end > va_start)) {
release(&m->lock);
cprintf("vmap_insert: overlap\n");
return -1; return -1;
} }
}
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
if(m->e[i].n)
continue;
__sync_fetch_and_add(&n->ref, 1);
m->e[i].va_start = va_start;
m->e[i].va_end = va_end;
m->e[i].n = n;
release(&m->lock);
return 0; return 0;
}
release(&m->lock);
cprintf("vmap_insert: out of vma slots\n");
return -1;
} }
// Allocate page tables and physical memory to grow process from oldsz to struct vma *
// newsz, which need not be page aligned. Returns new size or 0 on error. vmap_lookup(struct vmap *m, uint va)
int
allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
{ {
char *mem; acquire(&m->lock);
uint a; for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
struct vma *e = &m->e[i];
if (va >= e->va_start && va < e->va_end) {
acquire(&e->lock);
release(&m->lock);
return e;
}
}
release(&m->lock);
return 0;
}
if(newsz > USERTOP) struct vmap *
vmap_copy(struct vmap *m)
{
struct vmap *c = vmap_alloc();
if(c == 0)
return 0; return 0;
if(newsz < oldsz)
return oldsz; acquire(&m->lock);
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
a = PGROUNDUP(oldsz); if(m->e[i].n == 0)
for(; a < newsz; a += PGSIZE){ continue;
mem = kalloc(); c->e[i].va_start = m->e[i].va_start;
if(mem == 0){ c->e[i].va_end = m->e[i].va_end;
cprintf("allocuvm out of memory\n"); c->e[i].n = vmn_copy(m->e[i].n);
deallocuvm(pgdir, newsz, oldsz); if(c->e[i].n == 0) {
release(&m->lock);
vmap_free(c);
return 0; return 0;
} }
memset(mem, 0, PGSIZE); __sync_fetch_and_add(&c->e[i].n->ref, 1);
mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U);
} }
return newsz; release(&m->lock);
return c;
} }
// Deallocate user pages to bring the process size from oldsz to // Load a program segment into a vmnode.
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
// need to be less than oldsz. oldsz can be larger than the actual
// process size. Returns the new process size.
int int
deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) vmn_load(struct vmnode *vmn, struct inode *ip, uint offset, uint sz)
{ {
pte_t *pte; for(uint i = 0; i < sz; i += PGSIZE){
uint a, pa; uint n;
char *p = vmn->page[i / PGSIZE];
if(newsz >= oldsz) if(sz - i < PGSIZE)
return oldsz; n = sz - i;
else
a = PGROUNDUP(newsz); n = PGSIZE;
for(; a < oldsz; a += PGSIZE){ if(readi(ip, p, offset+i, n) != n)
pte = walkpgdir(pgdir, (char*)a, 0); return -1;
if(pte && (*pte & PTE_P) != 0){
pa = PTE_ADDR(*pte);
if(pa == 0)
panic("kfree");
kfree((char*)pa);
*pte = 0;
}
} }
return newsz; return 0;
} }
// Free a page table and all the physical memory pages // Free a page table and all the physical memory pages
...@@ -293,7 +378,6 @@ freevm(pde_t *pgdir) ...@@ -293,7 +378,6 @@ freevm(pde_t *pgdir)
if(pgdir == 0) if(pgdir == 0)
panic("freevm: no pgdir"); panic("freevm: no pgdir");
deallocuvm(pgdir, USERTOP, 0);
for(i = 0; i < NPDENTRIES; i++){ for(i = 0; i < NPDENTRIES; i++){
if(pgdir[i] & PTE_P) if(pgdir[i] & PTE_P)
kfree((char*)PTE_ADDR(pgdir[i])); kfree((char*)PTE_ADDR(pgdir[i]));
...@@ -301,37 +385,6 @@ freevm(pde_t *pgdir) ...@@ -301,37 +385,6 @@ freevm(pde_t *pgdir)
kfree((char*)pgdir); kfree((char*)pgdir);
} }
// Given a parent process's page table, create a copy
// of it for a child.
pde_t*
copyuvm(pde_t *pgdir, uint sz)
{
pde_t *d;
pte_t *pte;
uint pa, i;
char *mem;
if((d = setupkvm()) == 0)
return 0;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0)
panic("copyuvm: pte should exist");
if(!(*pte & PTE_P))
panic("copyuvm: page not present");
pa = PTE_ADDR(*pte);
if((mem = kalloc()) == 0)
goto bad;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0)
goto bad;
}
return d;
bad:
freevm(d);
return 0;
}
//PAGEBREAK! //PAGEBREAK!
// Map user virtual address to kernel physical address. // Map user virtual address to kernel physical address.
char* char*
...@@ -347,28 +400,73 @@ uva2ka(pde_t *pgdir, char *uva) ...@@ -347,28 +400,73 @@ uva2ka(pde_t *pgdir, char *uva)
return (char*)PTE_ADDR(*pte); return (char*)PTE_ADDR(*pte);
} }
// Copy len bytes from p to user address va in page table pgdir. // Copy len bytes from p to user address va in vmap.
// Most useful when pgdir is not the current page table. // Most useful when vmap is not the current page table.
// uva2ka ensures this only works for PTE_U pages.
int int
copyout(pde_t *pgdir, uint va, void *p, uint len) copyout(struct vmap *vmap, uint va, void *p, uint len)
{ {
char *buf, *pa0; char *buf = (char*)p;
uint n, va0; while(len > 0){
uint va0 = (uint)PGROUNDDOWN(va);
struct vma *vma = vmap_lookup(vmap, va);
if(vma == 0)
return -1;
uint pn = (va0 - vma->va_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
panic("copyout: missing page");
uint n = PGSIZE - (va - va0);
if(n > len)
n = len;
memmove(p0 + (va - va0), buf, n);
len -= n;
buf += n;
va = va0 + PGSIZE;
release(&vma->lock);
}
return 0;
}
buf = (char*)p; int
copyin(struct vmap *vmap, uint va, void *p, uint len)
{
char *buf = (char*)p;
while(len > 0){ while(len > 0){
va0 = (uint)PGROUNDDOWN(va); uint va0 = (uint)PGROUNDDOWN(va);
pa0 = uva2ka(pgdir, (char*)va0); struct vma *vma = vmap_lookup(vmap, va);
if(pa0 == 0) if(vma == 0)
return -1; return -1;
n = PGSIZE - (va - va0);
uint pn = (va0 - vma->va_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
panic("copyout: missing page");
uint n = PGSIZE - (va - va0);
if(n > len) if(n > len)
n = len; n = len;
memmove(pa0 + (va - va0), buf, n); memmove(buf, p0 + (va - va0), n);
len -= n; len -= n;
buf += n; buf += n;
va = va0 + PGSIZE; va = va0 + PGSIZE;
release(&vma->lock);
} }
return 0; return 0;
} }
int
pagefault(pde_t *pgdir, struct vmap *vmap, uint va)
{
pte_t *pte = walkpgdir(pgdir, (const void *)va, 1);
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W))
return 0;
struct vma *m = vmap_lookup(vmap, va);
if(m == 0)
return -1;
uint npg = (PGROUNDDOWN(va) - m->va_start) / PGSIZE;
*pte = PADDR(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
release(&m->lock);
return 1;
}
...@@ -5,14 +5,14 @@ inb(ushort port) ...@@ -5,14 +5,14 @@ inb(ushort port)
{ {
uchar data; uchar data;
asm volatile("in %1,%0" : "=a" (data) : "d" (port)); __asm volatile("in %1,%0" : "=a" (data) : "d" (port));
return data; return data;
} }
static inline void static inline void
insl(int port, void *addr, int cnt) insl(int port, void *addr, int cnt)
{ {
asm volatile("cld; rep insl" : __asm volatile("cld; rep insl" :
"=D" (addr), "=c" (cnt) : "=D" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) : "d" (port), "0" (addr), "1" (cnt) :
"memory", "cc"); "memory", "cc");
...@@ -21,19 +21,19 @@ insl(int port, void *addr, int cnt) ...@@ -21,19 +21,19 @@ insl(int port, void *addr, int cnt)
static inline void static inline void
outb(ushort port, uchar data) outb(ushort port, uchar data)
{ {
asm volatile("out %0,%1" : : "a" (data), "d" (port)); __asm volatile("out %0,%1" : : "a" (data), "d" (port));
} }
static inline void static inline void
outw(ushort port, ushort data) outw(ushort port, ushort data)
{ {
asm volatile("out %0,%1" : : "a" (data), "d" (port)); __asm volatile("out %0,%1" : : "a" (data), "d" (port));
} }
static inline void static inline void
outsl(int port, const void *addr, int cnt) outsl(int port, const void *addr, int cnt)
{ {
asm volatile("cld; rep outsl" : __asm volatile("cld; rep outsl" :
"=S" (addr), "=c" (cnt) : "=S" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) : "d" (port), "0" (addr), "1" (cnt) :
"cc"); "cc");
...@@ -42,7 +42,7 @@ outsl(int port, const void *addr, int cnt) ...@@ -42,7 +42,7 @@ outsl(int port, const void *addr, int cnt)
static inline void static inline void
stosb(void *addr, int data, int cnt) stosb(void *addr, int data, int cnt)
{ {
asm volatile("cld; rep stosb" : __asm volatile("cld; rep stosb" :
"=D" (addr), "=c" (cnt) : "=D" (addr), "=c" (cnt) :
"0" (addr), "1" (cnt), "a" (data) : "0" (addr), "1" (cnt), "a" (data) :
"memory", "cc"); "memory", "cc");
...@@ -59,7 +59,7 @@ lgdt(struct segdesc *p, int size) ...@@ -59,7 +59,7 @@ lgdt(struct segdesc *p, int size)
pd[1] = (uint)p; pd[1] = (uint)p;
pd[2] = (uint)p >> 16; pd[2] = (uint)p >> 16;
asm volatile("lgdt (%0)" : : "r" (pd)); __asm volatile("lgdt (%0)" : : "r" (pd));
} }
struct gatedesc; struct gatedesc;
...@@ -73,34 +73,34 @@ lidt(struct gatedesc *p, int size) ...@@ -73,34 +73,34 @@ lidt(struct gatedesc *p, int size)
pd[1] = (uint)p; pd[1] = (uint)p;
pd[2] = (uint)p >> 16; pd[2] = (uint)p >> 16;
asm volatile("lidt (%0)" : : "r" (pd)); __asm volatile("lidt (%0)" : : "r" (pd));
} }
static inline void static inline void
ltr(ushort sel) ltr(ushort sel)
{ {
asm volatile("ltr %0" : : "r" (sel)); __asm volatile("ltr %0" : : "r" (sel));
} }
static inline uint static inline uint
readeflags(void) readeflags(void)
{ {
uint eflags; uint eflags;
asm volatile("pushfl; popl %0" : "=r" (eflags)); __asm volatile("pushfl; popl %0" : "=r" (eflags));
return eflags; return eflags;
} }
static inline void static inline void
loadgs(ushort v) loadgs(ushort v)
{ {
asm volatile("movw %0, %%gs" : : "r" (v)); __asm volatile("movw %0, %%gs" : : "r" (v));
} }
static inline uint static inline uint
rebp(void) rebp(void)
{ {
uint val; uint val;
asm volatile("movl %%ebp,%0" : "=r" (val)); __asm volatile("movl %%ebp,%0" : "=r" (val));
return val; return val;
} }
...@@ -108,20 +108,20 @@ static inline uint ...@@ -108,20 +108,20 @@ static inline uint
resp(void) resp(void)
{ {
uint val; uint val;
asm volatile("movl %%esp,%0" : "=r" (val)); __asm volatile("movl %%esp,%0" : "=r" (val));
return val; return val;
} }
static inline void static inline void
cli(void) cli(void)
{ {
asm volatile("cli"); __asm volatile("cli");
} }
static inline void static inline void
sti(void) sti(void)
{ {
asm volatile("sti"); __asm volatile("sti");
} }
static inline uint static inline uint
...@@ -130,7 +130,7 @@ xchg(volatile uint *addr, uint newval) ...@@ -130,7 +130,7 @@ xchg(volatile uint *addr, uint newval)
uint result; uint result;
// The + in "+m" denotes a read-modify-write operand. // The + in "+m" denotes a read-modify-write operand.
asm volatile("lock; xchgl %0, %1" : __asm volatile("lock; xchgl %0, %1" :
"+m" (*addr), "=a" (result) : "+m" (*addr), "=a" (result) :
"1" (newval) : "1" (newval) :
"cc"); "cc");
...@@ -141,14 +141,14 @@ xchg(volatile uint *addr, uint newval) ...@@ -141,14 +141,14 @@ xchg(volatile uint *addr, uint newval)
static inline void static inline void
lcr0(uint val) lcr0(uint val)
{ {
asm volatile("movl %0,%%cr0" : : "r" (val)); __asm volatile("movl %0,%%cr0" : : "r" (val));
} }
static inline uint static inline uint
rcr0(void) rcr0(void)
{ {
uint val; uint val;
asm volatile("movl %%cr0,%0" : "=r" (val)); __asm volatile("movl %%cr0,%0" : "=r" (val));
return val; return val;
} }
...@@ -156,21 +156,21 @@ static inline uint ...@@ -156,21 +156,21 @@ static inline uint
rcr2(void) rcr2(void)
{ {
uint val; uint val;
asm volatile("movl %%cr2,%0" : "=r" (val)); __asm volatile("movl %%cr2,%0" : "=r" (val));
return val; return val;
} }
static inline void static inline void
lcr3(uint val) lcr3(uint val)
{ {
asm volatile("movl %0,%%cr3" : : "r" (val)); __asm volatile("movl %0,%%cr3" : : "r" (val));
} }
static inline uint static inline uint
rcr3(void) rcr3(void)
{ {
uint val; uint val;
asm volatile("movl %%cr3,%0" : "=r" (val)); __asm volatile("movl %%cr3,%0" : "=r" (val));
return val; return val;
} }
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论