提交 554dd487 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

merge

......@@ -72,7 +72,7 @@ AS = $(TOOLPREFIX)gas
LD = $(TOOLPREFIX)ld
OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror
CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -std=c99
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
ASFLAGS = -m32 -gdwarf-2
# FreeBSD ld wants ``elf_i386_fbsd''
......
......@@ -7,6 +7,7 @@ struct proc;
struct spinlock;
struct condvar;
struct stat;
struct vmnode;
// bio.c
void binit(void);
......@@ -166,15 +167,20 @@ void kvmalloc(void);
void vmenable(void);
pde_t* setupkvm(void);
char* uva2ka(pde_t*, char*);
int allocuvm(pde_t*, uint, uint);
int deallocuvm(pde_t*, uint, uint);
struct vmnode * vmn_allocpg(uint);
void vmn_free(struct vmnode *);
int vmn_load(struct vmnode *, struct inode*, uint, uint);
struct vmap * vmap_alloc(void);
void vmap_free(struct vmap *);
int vmap_insert(struct vmap *, struct vmnode *n, uint);
struct vma * vmap_lookup(struct vmap *, uint);
struct vmap * vmap_copy(struct vmap *);
void freevm(pde_t*);
void inituvm(pde_t*, char*, uint);
int loaduvm(pde_t*, char*, struct inode*, uint, uint);
pde_t* copyuvm(pde_t*, uint);
void switchuvm(struct proc*);
void switchkvm(void);
int copyout(pde_t*, uint, void*, uint);
int copyout(struct vmap *, uint, void*, uint);
int copyin(struct vmap *, uint, void*, uint);
int pagefault(pde_t*, struct vmap *, uint);
// number of elements in fixed-size array
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
......@@ -13,12 +13,14 @@ int
exec(char *path, char **argv)
{
char *s, *last;
int i, off;
uint argc, sz, sp, ustack[3+MAXARG+1];
int i, off, brk = 0;
uint argc, sp, ustack[3+MAXARG+1];
struct elfhdr elf;
struct inode *ip;
struct inode *ip = 0;
struct proghdr ph;
pde_t *pgdir, *oldpgdir;
pde_t *pgdir = 0, *oldpgdir;
struct vmap *vmap = 0, *oldvmap;
struct vmnode *vmn = 0;
if((ip = namei(path)) == 0)
return -1;
......@@ -34,8 +36,10 @@ exec(char *path, char **argv)
if((pgdir = setupkvm()) == 0)
goto bad;
if((vmap = vmap_alloc()) == 0)
goto bad;
// Load program into memory.
sz = 0;
for(i=0, off=elf.phoff; i<elf.phnum; i++, off+=sizeof(ph)){
if(readi(ip, (char*)&ph, off, sizeof(ph)) != sizeof(ph))
goto bad;
......@@ -43,27 +47,51 @@ exec(char *path, char **argv)
continue;
if(ph.memsz < ph.filesz)
goto bad;
if((sz = allocuvm(pgdir, sz, ph.va + ph.memsz)) == 0)
if(ph.va % PGSIZE) {
cprintf("unaligned ph.va\n");
goto bad;
}
uint va_start = PGROUNDDOWN(ph.va);
uint va_end = PGROUNDUP(ph.va + ph.memsz);
if(va_end > brk)
brk = va_end;
int npg = (va_end - va_start) / PGSIZE;
if ((vmn = vmn_allocpg(npg)) == 0)
goto bad;
if(vmn_load(vmn, ip, ph.offset, ph.filesz) < 0)
goto bad;
if(loaduvm(pgdir, (char*)ph.va, ip, ph.offset, ph.filesz) < 0)
if(vmap_insert(vmap, vmn, ph.va) < 0)
goto bad;
vmn = 0;
}
iunlockput(ip);
ip = 0;
// Allocate a one-page stack at the next page boundary
sz = PGROUNDUP(sz);
if((sz = allocuvm(pgdir, sz, sz + PGSIZE)) == 0)
// Allocate a vmnode for the heap.
// XXX pre-allocate 32 pages..
if((vmn = vmn_allocpg(32)) == 0)
goto bad;
if(vmap_insert(vmap, vmn, brk) < 0)
goto bad;
vmn = 0;
// Allocate a one-page stack at the top of the (user) address space
if((vmn = vmn_allocpg(1)) == 0)
goto bad;
if(vmap_insert(vmap, vmn, USERTOP-PGSIZE) < 0)
goto bad;
vmn = 0;
// Push argument strings, prepare rest of stack in ustack.
sp = sz;
sp = USERTOP;
for(argc = 0; argv[argc]; argc++) {
if(argc >= MAXARG)
goto bad;
sp -= strlen(argv[argc]) + 1;
sp &= ~3;
if(copyout(pgdir, sp, argv[argc], strlen(argv[argc]) + 1) < 0)
if(copyout(vmap, sp, argv[argc], strlen(argv[argc]) + 1) < 0)
goto bad;
ustack[3+argc] = sp;
}
......@@ -74,7 +102,7 @@ exec(char *path, char **argv)
ustack[2] = sp - (argc+1)*4; // argv pointer
sp -= (3+argc+1) * 4;
if(copyout(pgdir, sp, ustack, (3+argc+1)*4) < 0)
if(copyout(vmap, sp, ustack, (3+argc+1)*4) < 0)
goto bad;
// Save program name for debugging.
......@@ -85,12 +113,16 @@ exec(char *path, char **argv)
// Commit to the user image.
oldpgdir = proc->pgdir;
oldvmap = proc->vmap;
proc->pgdir = pgdir;
proc->sz = sz;
proc->vmap = vmap;
proc->brk = brk + 4; // XXX so that brk-1 points within heap vma..
proc->tf->eip = elf.entry; // main
proc->tf->esp = sp;
switchuvm(proc);
freevm(oldpgdir);
vmap_free(oldvmap);
return 0;
bad:
......@@ -99,5 +131,9 @@ exec(char *path, char **argv)
freevm(pgdir);
if(ip)
iunlockput(ip);
if(vmap)
vmap_free(vmap);
if(vmn)
vmn_free(vmn);
return -1;
}
......@@ -90,6 +90,7 @@ kalloc(void)
release(&kmem->lock);
if (r == 0)
cprintf("%d: kalloc out\n", cpunum());
memset(r, 2, PGSIZE);
return (char*)r;
}
......@@ -35,7 +35,7 @@ jmpkstack(void)
if(kstack == 0)
panic("jmpkstack kalloc");
top = kstack + PGSIZE;
asm volatile("movl %0,%%esp; call mainc" : : "r" (top));
__asm volatile("movl %0,%%esp; call mainc" : : "r" (top));
panic("jmpkstack");
}
......
......@@ -122,7 +122,7 @@ struct segdesc {
#define PDXSHIFT 22 // offset of PDX in a linear address
#define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1))
#define PGROUNDDOWN(a) ((char*)((((unsigned int)(a)) & ~(PGSIZE-1))))
#define PGROUNDDOWN(a) ((__typeof__(a))((((unsigned int)(a)) & ~(PGSIZE-1))))
// Page table/directory entry flags.
#define PTE_P 0x001 // Present
......
......@@ -145,8 +145,15 @@ userinit(void)
initproc = p;
if((p->pgdir = setupkvm()) == 0)
panic("userinit: out of memory?");
inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
p->sz = PGSIZE;
if((p->vmap = vmap_alloc()) == 0)
panic("userinit: out of vmaps?");
struct vmnode *vmn = vmn_allocpg(PGROUNDUP((int)_binary_initcode_size) / PGSIZE);
if(vmn == 0)
panic("userinit: vmn_allocpg");
if(vmap_insert(p->vmap, vmn, 0) < 0)
panic("userinit: vmap_insert");
if(copyout(p->vmap, 0, _binary_initcode_start, (int)_binary_initcode_size) < 0)
panic("userinit: copyout");
memset(p->tf, 0, sizeof(*p->tf));
p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
......@@ -166,17 +173,22 @@ userinit(void)
int
growproc(int n)
{
uint sz;
sz = proc->sz;
if(n > 0){
if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0)
return -1;
} else if(n < 0){
if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0)
return -1;
uint brk = proc->brk;
uint nbrk = brk + n;
struct vma *vma = vmap_lookup(proc->vmap, brk-1);
if(vma == 0)
return -1;
if(nbrk > vma->va_end){
/* XXX */
release(&vma->lock);
cprintf("cannot resize heap: %d -> %d\n", brk, nbrk);
return -1;
}
proc->sz = sz;
proc->brk = brk + n;
release(&vma->lock);
switchuvm(proc);
return 0;
}
......@@ -194,14 +206,22 @@ fork(void)
if((np = allocproc()) == 0)
return -1;
if((np->pgdir = setupkvm()) == 0){
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
return -1;
}
// Copy process state from p.
if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
if((np->vmap = vmap_copy(proc->vmap)) == 0){
freevm(np->pgdir);
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
return -1;
}
np->sz = proc->sz;
np->brk = proc->brk;
np->parent = proc;
*np->tf = *proc->tf;
......@@ -294,6 +314,7 @@ wait(void)
kfree(p->kstack);
p->kstack = 0;
freevm(p->pgdir);
vmap_free(p->vmap);
p->state = UNUSED;
p->pid = 0;
p->parent = 0;
......
#include "spinlock.h"
// Segments in proc->gdt.
// Also known to bootasm.S and trapasm.S
#define SEG_KCODE 1 // kernel code
......@@ -8,7 +10,6 @@
#define SEG_TSS 6 // this process's task state
#define NSEGS 7
//PAGEBREAK: 17
// Saved registers for kernel context switches.
// Don't need to save all the segment registers (%cs, etc),
......@@ -30,10 +31,32 @@ struct context {
enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Virtual memory
struct vmnode {
uint npages;
char *page[32];
uint ref;
uint alloc;
};
struct vma {
uint va_start; // start of mapping
uint va_end; // one past the last byte
struct vmnode *n;
struct spinlock lock; // serialize fault/unmap
};
struct vmap {
struct vma e[16];
struct spinlock lock; // serialize map/lookup/unmap
uint alloc;
};
// Per-process state
struct proc {
uint sz; // Size of process memory (bytes)
struct vmap *vmap; // va -> vma
pde_t* pgdir; // Page table
uint brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process
enum procstate state; // Process state
volatile int pid; // Process ID
......@@ -110,8 +133,8 @@ extern int ncpu;
// holding those two variables in the local cpu's struct cpu.
// This is similar to how thread-local variables are implemented
// in thread libraries such as Linux pthreads.
extern struct cpu *cpu asm("%gs:0"); // &cpus[cpunum()]
extern struct proc *proc asm("%gs:4"); // cpus[cpunum()].proc
extern struct ptable *ptable asm("%gs:8"); // &ptables[cpunum()]
extern struct kmem *kmem asm("%gs:12"); // &kmems[cpunum()]
extern struct runq *runq asm("%gs:16"); // &runqs[cpunum()]
extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()]
extern struct proc *proc __asm("%gs:4"); // cpus[cpunum()].proc
extern struct ptable *ptable __asm("%gs:8"); // &ptables[cpunum()]
extern struct kmem *kmem __asm("%gs:12"); // &kmems[cpunum()]
extern struct runq *runq __asm("%gs:16"); // &runqs[cpunum()]
#pragma once
// Mutual exclusion lock.
struct spinlock {
uint locked; // Is the lock held?
......
......@@ -19,8 +19,10 @@
int
fetchint(struct proc *p, uint addr, int *ip)
{
#if 0 /* XXX use pagefault() */
if(addr >= p->sz || addr+4 > p->sz)
return -1;
#endif
*ip = *(int*)(addr);
return 0;
}
......@@ -33,10 +35,16 @@ fetchstr(struct proc *p, uint addr, char **pp)
{
char *s, *ep;
#if 0 /* XXX use pagefault() */
if(addr >= p->sz)
return -1;
#endif
*pp = (char*)addr;
#if 0 /* XXX use pagefault() */
ep = (char*)p->sz;
#else
ep = (char *) 0xffffffff;
#endif
for(s = *pp; s < ep; s++)
if(*s == 0)
return s - *pp;
......@@ -60,8 +68,10 @@ argptr(int n, char **pp, int size)
if(argint(n, &i) < 0)
return -1;
#if 0 /* XXX use pagefault() */
if((uint)i >= proc->sz || (uint)i+size > proc->sz)
return -1;
#endif
*pp = (char*)i;
return 0;
}
......
......@@ -20,3 +20,5 @@
#define SYS_sbrk 19
#define SYS_sleep 20
#define SYS_uptime 21
#define SYS_map 22
#define SYS_unmap 23
......@@ -51,7 +51,7 @@ sys_sbrk(void)
if(argint(0, &n) < 0)
return -1;
addr = proc->sz;
addr = proc->brk;
if(growproc(n) < 0)
return -1;
return addr;
......
......@@ -47,6 +47,13 @@ trap(struct trapframe *tf)
return;
}
if(tf->trapno == T_PGFLT){
if(pagefault(proc->pgdir, proc->vmap, rcr2()) >= 0){
switchuvm(proc);
return;
}
}
switch(tf->trapno){
case T_IRQ0 + IRQ_TIMER:
if(cpu->id == 0){
......
......@@ -1379,13 +1379,13 @@ void
validateint(int *p)
{
int res;
asm("mov %%esp, %%ebx\n\t"
"mov %3, %%esp\n\t"
"int %2\n\t"
"mov %%ebx, %%esp" :
"=a" (res) :
"a" (SYS_sleep), "n" (T_SYSCALL), "c" (p) :
"ebx");
__asm("mov %%esp, %%ebx\n\t"
"mov %3, %%esp\n\t"
"int %2\n\t"
"mov %%ebx, %%esp" :
"=a" (res) :
"a" (SYS_sleep), "n" (T_SYSCALL), "c" (p) :
"ebx");
}
void
......
......@@ -191,97 +191,182 @@ switchuvm(struct proc *p)
popcli();
}
// Load the initcode into address 0 of pgdir.
// sz must be less than a page.
struct {
struct vmnode n[1024];
} vmnodes;
struct {
struct vmap m[128];
} vmaps;
struct vmnode *
vmn_alloc(void)
{
for(uint i = 0; i < sizeof(vmnodes.n) / sizeof(vmnodes.n[0]); i++) {
struct vmnode *n = &vmnodes.n[i];
if(n->alloc == 0 && __sync_bool_compare_and_swap(&n->alloc, 0, 1)) {
n->npages = 0;
n->ref = 0;
return n;
}
}
panic("out of vmnodes");
}
struct vmnode *
vmn_allocpg(uint npg)
{
struct vmnode *n = vmn_alloc();
if(npg > sizeof(n->page) / sizeof(n->page[0])) {
cprintf("vmnode too big: %d\n", npg);
return 0;
}
for(uint i = 0; i < npg; i++) {
if((n->page[i] = kalloc()) == 0) {
vmn_free(n);
return 0;
}
memset((char *) n->page[i], 0, PGSIZE);
n->npages++;
}
return n;
}
void
inituvm(pde_t *pgdir, char *init, uint sz)
vmn_free(struct vmnode *n)
{
char *mem;
if(sz >= PGSIZE)
panic("inituvm: more than a page");
mem = kalloc();
memset(mem, 0, PGSIZE);
mappages(pgdir, 0, PGSIZE, PADDR(mem), PTE_W|PTE_U);
memmove(mem, init, sz);
for(uint i = 0; i < n->npages; i++) {
kfree((char *) n->page[i]);
n->page[i] = 0;
}
n->alloc = 0;
}
// Load a program segment into pgdir. addr must be page-aligned
// and the pages from addr to addr+sz must already be mapped.
int
loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
void
vmn_decref(struct vmnode *n)
{
uint i, pa, n;
pte_t *pte;
if(__sync_sub_and_fetch(&n->ref, 1) == 0)
vmn_free(n);
}
if((uint)addr % PGSIZE != 0)
panic("loaduvm: addr must be page aligned");
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, addr+i, 0)) == 0)
panic("loaduvm: address should exist");
pa = PTE_ADDR(*pte);
if(sz - i < PGSIZE)
n = sz - i;
else
n = PGSIZE;
if(readi(ip, (char*)pa, offset+i, n) != n)
return -1;
struct vmnode *
vmn_copy(struct vmnode *n)
{
struct vmnode *c = vmn_allocpg(n->npages);
if(c != 0)
for(uint i = 0; i < n->npages; i++)
memmove(c->page[i], n->page[i], PGSIZE);
return c;
}
struct vmap *
vmap_alloc(void)
{
for(uint i = 0; i < sizeof(vmaps.m) / sizeof(vmaps.m[0]); i++) {
struct vmap *m = &vmaps.m[i];
if(m->alloc == 0 && __sync_bool_compare_and_swap(&m->alloc, 0, 1)) {
for(uint j = 0; j < sizeof(m->e) / sizeof(m->e[0]); j++)
m->e[j].n = 0;
return m;
}
}
return 0;
panic("out of vmaps");
}
void
vmap_free(struct vmap *m)
{
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++)
if(m->e[i].n)
vmn_decref(m->e[i].n);
m->alloc = 0;
}
// Allocate page tables and physical memory to grow process from oldsz to
// newsz, which need not be page aligned. Returns new size or 0 on error.
int
allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
vmap_insert(struct vmap *m, struct vmnode *n, uint va_start)
{
char *mem;
uint a;
acquire(&m->lock);
uint va_end = va_start + n->npages * PGSIZE;
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
if(m->e[i].n && (m->e[i].va_start < va_end && m->e[i].va_end > va_start)) {
release(&m->lock);
cprintf("vmap_insert: overlap\n");
return -1;
}
}
if(newsz > USERTOP)
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
if(m->e[i].n)
continue;
__sync_fetch_and_add(&n->ref, 1);
m->e[i].va_start = va_start;
m->e[i].va_end = va_end;
m->e[i].n = n;
release(&m->lock);
return 0;
if(newsz < oldsz)
return oldsz;
a = PGROUNDUP(oldsz);
for(; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
cprintf("allocuvm out of memory\n");
deallocuvm(pgdir, newsz, oldsz);
}
release(&m->lock);
cprintf("vmap_insert: out of vma slots\n");
return -1;
}
struct vma *
vmap_lookup(struct vmap *m, uint va)
{
acquire(&m->lock);
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
struct vma *e = &m->e[i];
if (va >= e->va_start && va < e->va_end) {
acquire(&e->lock);
release(&m->lock);
return e;
}
}
release(&m->lock);
return 0;
}
struct vmap *
vmap_copy(struct vmap *m)
{
struct vmap *c = vmap_alloc();
if(c == 0)
return 0;
acquire(&m->lock);
for(uint i = 0; i < sizeof(m->e) / sizeof(m->e[0]); i++) {
if(m->e[i].n == 0)
continue;
c->e[i].va_start = m->e[i].va_start;
c->e[i].va_end = m->e[i].va_end;
c->e[i].n = vmn_copy(m->e[i].n);
if(c->e[i].n == 0) {
release(&m->lock);
vmap_free(c);
return 0;
}
memset(mem, 0, PGSIZE);
mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U);
__sync_fetch_and_add(&c->e[i].n->ref, 1);
}
return newsz;
release(&m->lock);
return c;
}
// Deallocate user pages to bring the process size from oldsz to
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
// need to be less than oldsz. oldsz can be larger than the actual
// process size. Returns the new process size.
// Load a program segment into a vmnode.
int
deallocuvm(pde_t *pgdir, uint oldsz, uint newsz)
vmn_load(struct vmnode *vmn, struct inode *ip, uint offset, uint sz)
{
pte_t *pte;
uint a, pa;
if(newsz >= oldsz)
return oldsz;
a = PGROUNDUP(newsz);
for(; a < oldsz; a += PGSIZE){
pte = walkpgdir(pgdir, (char*)a, 0);
if(pte && (*pte & PTE_P) != 0){
pa = PTE_ADDR(*pte);
if(pa == 0)
panic("kfree");
kfree((char*)pa);
*pte = 0;
}
for(uint i = 0; i < sz; i += PGSIZE){
uint n;
char *p = vmn->page[i / PGSIZE];
if(sz - i < PGSIZE)
n = sz - i;
else
n = PGSIZE;
if(readi(ip, p, offset+i, n) != n)
return -1;
}
return newsz;
return 0;
}
// Free a page table and all the physical memory pages
......@@ -293,7 +378,6 @@ freevm(pde_t *pgdir)
if(pgdir == 0)
panic("freevm: no pgdir");
deallocuvm(pgdir, USERTOP, 0);
for(i = 0; i < NPDENTRIES; i++){
if(pgdir[i] & PTE_P)
kfree((char*)PTE_ADDR(pgdir[i]));
......@@ -301,37 +385,6 @@ freevm(pde_t *pgdir)
kfree((char*)pgdir);
}
// Given a parent process's page table, create a copy
// of it for a child.
pde_t*
copyuvm(pde_t *pgdir, uint sz)
{
pde_t *d;
pte_t *pte;
uint pa, i;
char *mem;
if((d = setupkvm()) == 0)
return 0;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0)
panic("copyuvm: pte should exist");
if(!(*pte & PTE_P))
panic("copyuvm: page not present");
pa = PTE_ADDR(*pte);
if((mem = kalloc()) == 0)
goto bad;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0)
goto bad;
}
return d;
bad:
freevm(d);
return 0;
}
//PAGEBREAK!
// Map user virtual address to kernel physical address.
char*
......@@ -347,28 +400,73 @@ uva2ka(pde_t *pgdir, char *uva)
return (char*)PTE_ADDR(*pte);
}
// Copy len bytes from p to user address va in page table pgdir.
// Most useful when pgdir is not the current page table.
// uva2ka ensures this only works for PTE_U pages.
// Copy len bytes from p to user address va in vmap.
// Most useful when vmap is not the current page table.
int
copyout(pde_t *pgdir, uint va, void *p, uint len)
copyout(struct vmap *vmap, uint va, void *p, uint len)
{
char *buf, *pa0;
uint n, va0;
buf = (char*)p;
char *buf = (char*)p;
while(len > 0){
va0 = (uint)PGROUNDDOWN(va);
pa0 = uva2ka(pgdir, (char*)va0);
if(pa0 == 0)
uint va0 = (uint)PGROUNDDOWN(va);
struct vma *vma = vmap_lookup(vmap, va);
if(vma == 0)
return -1;
n = PGSIZE - (va - va0);
uint pn = (va0 - vma->va_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
panic("copyout: missing page");
uint n = PGSIZE - (va - va0);
if(n > len)
n = len;
memmove(pa0 + (va - va0), buf, n);
memmove(p0 + (va - va0), buf, n);
len -= n;
buf += n;
va = va0 + PGSIZE;
release(&vma->lock);
}
return 0;
}
int
copyin(struct vmap *vmap, uint va, void *p, uint len)
{
char *buf = (char*)p;
while(len > 0){
uint va0 = (uint)PGROUNDDOWN(va);
struct vma *vma = vmap_lookup(vmap, va);
if(vma == 0)
return -1;
uint pn = (va0 - vma->va_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
panic("copyout: missing page");
uint n = PGSIZE - (va - va0);
if(n > len)
n = len;
memmove(buf, p0 + (va - va0), n);
len -= n;
buf += n;
va = va0 + PGSIZE;
release(&vma->lock);
}
return 0;
}
int
pagefault(pde_t *pgdir, struct vmap *vmap, uint va)
{
pte_t *pte = walkpgdir(pgdir, (const void *)va, 1);
if((*pte & (PTE_P|PTE_U|PTE_W)) == (PTE_P|PTE_U|PTE_W))
return 0;
struct vma *m = vmap_lookup(vmap, va);
if(m == 0)
return -1;
uint npg = (PGROUNDDOWN(va) - m->va_start) / PGSIZE;
*pte = PADDR(m->n->page[npg]) | PTE_P | PTE_U | PTE_W;
release(&m->lock);
return 1;
}
......@@ -5,47 +5,47 @@ inb(ushort port)
{
uchar data;
asm volatile("in %1,%0" : "=a" (data) : "d" (port));
__asm volatile("in %1,%0" : "=a" (data) : "d" (port));
return data;
}
static inline void
insl(int port, void *addr, int cnt)
{
asm volatile("cld; rep insl" :
"=D" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"memory", "cc");
__asm volatile("cld; rep insl" :
"=D" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"memory", "cc");
}
static inline void
outb(ushort port, uchar data)
{
asm volatile("out %0,%1" : : "a" (data), "d" (port));
__asm volatile("out %0,%1" : : "a" (data), "d" (port));
}
static inline void
outw(ushort port, ushort data)
{
asm volatile("out %0,%1" : : "a" (data), "d" (port));
__asm volatile("out %0,%1" : : "a" (data), "d" (port));
}
static inline void
outsl(int port, const void *addr, int cnt)
{
asm volatile("cld; rep outsl" :
"=S" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"cc");
__asm volatile("cld; rep outsl" :
"=S" (addr), "=c" (cnt) :
"d" (port), "0" (addr), "1" (cnt) :
"cc");
}
static inline void
stosb(void *addr, int data, int cnt)
{
asm volatile("cld; rep stosb" :
"=D" (addr), "=c" (cnt) :
"0" (addr), "1" (cnt), "a" (data) :
"memory", "cc");
__asm volatile("cld; rep stosb" :
"=D" (addr), "=c" (cnt) :
"0" (addr), "1" (cnt), "a" (data) :
"memory", "cc");
}
struct segdesc;
......@@ -59,7 +59,7 @@ lgdt(struct segdesc *p, int size)
pd[1] = (uint)p;
pd[2] = (uint)p >> 16;
asm volatile("lgdt (%0)" : : "r" (pd));
__asm volatile("lgdt (%0)" : : "r" (pd));
}
struct gatedesc;
......@@ -73,34 +73,34 @@ lidt(struct gatedesc *p, int size)
pd[1] = (uint)p;
pd[2] = (uint)p >> 16;
asm volatile("lidt (%0)" : : "r" (pd));
__asm volatile("lidt (%0)" : : "r" (pd));
}
static inline void
ltr(ushort sel)
{
asm volatile("ltr %0" : : "r" (sel));
__asm volatile("ltr %0" : : "r" (sel));
}
static inline uint
readeflags(void)
{
uint eflags;
asm volatile("pushfl; popl %0" : "=r" (eflags));
__asm volatile("pushfl; popl %0" : "=r" (eflags));
return eflags;
}
static inline void
loadgs(ushort v)
{
asm volatile("movw %0, %%gs" : : "r" (v));
__asm volatile("movw %0, %%gs" : : "r" (v));
}
static inline uint
rebp(void)
{
uint val;
asm volatile("movl %%ebp,%0" : "=r" (val));
__asm volatile("movl %%ebp,%0" : "=r" (val));
return val;
}
......@@ -108,20 +108,20 @@ static inline uint
resp(void)
{
uint val;
asm volatile("movl %%esp,%0" : "=r" (val));
__asm volatile("movl %%esp,%0" : "=r" (val));
return val;
}
static inline void
cli(void)
{
asm volatile("cli");
__asm volatile("cli");
}
static inline void
sti(void)
{
asm volatile("sti");
__asm volatile("sti");
}
static inline uint
......@@ -130,10 +130,10 @@ xchg(volatile uint *addr, uint newval)
uint result;
// The + in "+m" denotes a read-modify-write operand.
asm volatile("lock; xchgl %0, %1" :
"+m" (*addr), "=a" (result) :
"1" (newval) :
"cc");
__asm volatile("lock; xchgl %0, %1" :
"+m" (*addr), "=a" (result) :
"1" (newval) :
"cc");
return result;
}
......@@ -141,14 +141,14 @@ xchg(volatile uint *addr, uint newval)
static inline void
lcr0(uint val)
{
asm volatile("movl %0,%%cr0" : : "r" (val));
__asm volatile("movl %0,%%cr0" : : "r" (val));
}
static inline uint
rcr0(void)
{
uint val;
asm volatile("movl %%cr0,%0" : "=r" (val));
__asm volatile("movl %%cr0,%0" : "=r" (val));
return val;
}
......@@ -156,21 +156,21 @@ static inline uint
rcr2(void)
{
uint val;
asm volatile("movl %%cr2,%0" : "=r" (val));
__asm volatile("movl %%cr2,%0" : "=r" (val));
return val;
}
static inline void
lcr3(uint val)
{
asm volatile("movl %0,%%cr3" : : "r" (val));
__asm volatile("movl %0,%%cr3" : : "r" (val));
}
static inline uint
rcr3(void)
{
uint val;
asm volatile("movl %%cr3,%0" : "=r" (val));
__asm volatile("movl %%cr3,%0" : "=r" (val));
return val;
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论