提交 aa5825fc 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Drop in all the system calls.

上级 eed1de86
...@@ -96,6 +96,7 @@ void kmfree(void*); ...@@ -96,6 +96,7 @@ void kmfree(void*);
int cpunum(void); int cpunum(void);
void lapicstartap(u8, u32 addr); void lapicstartap(u8, u32 addr);
void lapiceoi(void); void lapiceoi(void);
void lapic_tlbflush(u32);
// mp.c // mp.c
extern int ncpu; extern int ncpu;
...@@ -212,6 +213,9 @@ void swtch(struct context**, struct context*); ...@@ -212,6 +213,9 @@ void swtch(struct context**, struct context*);
// trap.c // trap.c
extern struct segdesc bootgdt[NSEGS]; extern struct segdesc bootgdt[NSEGS];
extern u64 ticks; extern u64 ticks;
extern struct spinlock tickslock;
extern struct condvar cv_ticks;
// uart.c // uart.c
void uartputc(char c); void uartputc(char c);
...@@ -229,3 +233,6 @@ void switchkvm(void); ...@@ -229,3 +233,6 @@ void switchkvm(void);
int pagefault(struct vmap *, uptr, u32); int pagefault(struct vmap *, uptr, u32);
void vmap_decref(struct vmap *); void vmap_decref(struct vmap *);
int vmn_load(struct vmnode *, struct inode*, u64, u64); int vmn_load(struct vmnode *, struct inode*, u64, u64);
int vmap_remove(struct vmap *, uptr, u64);
void updatepages(pml4e_t*, void*, void*, int);
struct vmap * vmap_copy(struct vmap *, int);
...@@ -458,6 +458,228 @@ scheduler(void) ...@@ -458,6 +458,228 @@ scheduler(void)
} }
} }
// Grow/shrink current process's memory by n bytes.
// Growing may allocate vmas and physical memory,
// but avoids interfering with any existing vma.
// Assumes vmas around proc->brk are part of the growable heap.
// Shrinking just decreases proc->brk; doesn't deallocate.
// Return 0 on success, -1 on failure.
int
growproc(int n)
{
struct vmap *m = myproc()->vmap;
if(n < 0 && 0 - n <= myproc()->brk){
myproc()->brk += n;
return 0;
}
if(n < 0 || n > USERTOP || myproc()->brk + n > USERTOP)
return -1;
acquire(&m->lock);
// find first unallocated address in brk..brk+n
uptr newstart = myproc()->brk;
u64 newn = n;
rcu_begin_read();
while(newn > 0){
struct vma *e = vmap_lookup(m, newstart, 1);
if(e == 0)
break;
if(e->va_end >= newstart + newn){
newstart += newn;
newn = 0;
break;
}
newn -= e->va_end - newstart;
newstart = e->va_end;
}
rcu_end_read();
if(newn <= 0){
// no need to allocate
myproc()->brk += n;
release(&m->lock);
switchuvm(myproc());
return 0;
}
// is there space for newstart..newstart+newn?
if(vmap_lookup(m, newstart, newn) != 0){
cprintf("growproc: not enough room in address space; brk %d n %d\n",
myproc()->brk, n);
return -1;
}
// would the newly allocated region abut the next-higher
// vma? we can't allow that, since then a future sbrk()
// would start to use the next region (e.g. the stack).
if(vmap_lookup(m, PGROUNDUP(newstart+newn), 1) != 0){
cprintf("growproc: would abut next vma; brk %d n %d\n",
myproc()->brk, n);
return -1;
}
struct vmnode *vmn = vmn_allocpg(PGROUNDUP(newn) / PGSIZE);
if(vmn == 0){
release(&m->lock);
cprintf("growproc: vmn_allocpg failed\n");
return -1;
}
release(&m->lock); // XXX
if(vmap_insert(m, vmn, newstart) < 0){
vmn_free(vmn);
cprintf("growproc: vmap_insert failed\n");
return -1;
}
myproc()->brk += n;
switchuvm(myproc());
return 0;
}
// Kill the process with the given pid.
// Process won't exit until it returns
// to user space (see trap in trap.c).
int
kill(int pid)
{
struct proc *p;
p = (struct proc *) ns_lookup(nspid, KI(pid));
if (p == 0) {
panic("kill");
return -1;
}
acquire(&p->lock);
p->killed = 1;
if(p->state == SLEEPING){
// XXX
// we need to wake p up if it is cv_sleep()ing.
// can't change p from SLEEPING to RUNNABLE since that
// would make some condvar->waiters a dangling reference,
// and the non-zero p->cv_next will cause a future panic.
// can't call cv_wakeup(p->oncv) since that results in
// deadlock (addrun() acquires p->lock).
// can't release p->lock then call cv_wakeup() since the
// cv might be deallocated while we're using it
// (pipes dynamically allocate condvars).
}
release(&p->lock);
return 0;
}
// Create a new process copying p as the parent.
// Sets up stack to return as if from system call.
// Caller must set state of returned proc to RUNNABLE.
int
fork(int flags)
{
int i, pid;
struct proc *np;
int cow = 1;
// cprintf("%d: fork\n", proc->pid);
// Allocate process.
if((np = allocproc()) == 0)
return -1;
if(flags == 0) {
// Copy process state from p.
if((np->vmap = vmap_copy(myproc()->vmap, cow)) == 0){
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
if (ns_remove(nspid, KI(np->pid), np) == 0)
panic("fork: ns_remove");
rcu_delayed(np, kmfree);
return -1;
}
} else {
np->vmap = myproc()->vmap;
__sync_fetch_and_add(&np->vmap->ref, 1);
}
np->brk = myproc()->brk;
np->parent = myproc();
*np->tf = *myproc()->tf;
// Clear %eax so that fork returns 0 in the child.
np->tf->rax = 0;
for(i = 0; i < NOFILE; i++)
if(myproc()->ofile[i])
np->ofile[i] = filedup(myproc()->ofile[i]);
np->cwd = idup(myproc()->cwd);
pid = np->pid;
safestrcpy(np->name, myproc()->name, sizeof(myproc()->name));
acquire(&myproc()->lock);
SLIST_INSERT_HEAD(&myproc()->childq, np, child_next);
release(&myproc()->lock);
acquire(&np->lock);
addrun(np);
np->state = RUNNABLE;
release(&np->lock);
migrate(np);
// cprintf("%d: fork done (pid %d)\n", myproc()->pid, pid);
return pid;
}
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
wait(void)
{
struct proc *p, *np;
int havekids, pid;
for(;;){
// Scan children for ZOMBIEs
havekids = 0;
acquire(&myproc()->lock);
SLIST_FOREACH_SAFE(p, &myproc()->childq, child_next, np) {
havekids = 1;
acquire(&p->lock);
if(p->state == ZOMBIE){
release(&p->lock); // noone else better be trying to lock p
pid = p->pid;
SLIST_REMOVE(&myproc()->childq, p, proc, child_next);
release(&myproc()->lock);
kfree(p->kstack);
p->kstack = 0;
vmap_decref(p->vmap);
p->state = UNUSED;
if (ns_remove(nspid, KI(p->pid), p) == 0)
panic("wait: ns_remove");
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
p->killed = 0;
rcu_delayed(p, kmfree);
return pid;
}
release(&p->lock);
}
// No point waiting if we don't have any children.
if(!havekids || myproc()->killed){
release(&myproc()->lock);
return -1;
}
// Wait for children to exit. (See wakeup1 call in proc_exit.)
cv_sleep(&myproc()->cv, &myproc()->lock);
release(&myproc()->lock);
}
}
......
...@@ -29,8 +29,10 @@ acquire(struct spinlock *lk) ...@@ -29,8 +29,10 @@ acquire(struct spinlock *lk)
pushcli(); // disable interrupts to avoid deadlock. pushcli(); // disable interrupts to avoid deadlock.
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
if(holding(lk)) if(holding(lk)) {
cprintf("%lx\n", __builtin_return_address(0));
panic("acquire"); panic("acquire");
}
#endif #endif
mtrace_lock_register(RET_EIP(), mtrace_lock_register(RET_EIP(),
......
...@@ -125,34 +125,30 @@ extern int sys_unmap(void); ...@@ -125,34 +125,30 @@ extern int sys_unmap(void);
extern int sys_halt(void); extern int sys_halt(void);
static int (*syscalls[])(void) = { static int (*syscalls[])(void) = {
#if 0
[SYS_chdir] = sys_chdir, [SYS_chdir] = sys_chdir,
[SYS_close] = sys_close, [SYS_close] = sys_close,
[SYS_dup] = sys_dup, [SYS_dup] = sys_dup,
#endif [SYS_exec] = sys_exec,
[SYS_exec] = sys_exec, [SYS_exit] = sys_exit,
#if 0 [SYS_fork] = sys_fork,
[SYS_exit] sys_exit, [SYS_fstat] = sys_fstat,
[SYS_fork] sys_fork, [SYS_getpid] = sys_getpid,
[SYS_fstat] sys_fstat, [SYS_kill] = sys_kill,
[SYS_getpid] sys_getpid, [SYS_link] = sys_link,
[SYS_kill] sys_kill, [SYS_mkdir] = sys_mkdir,
[SYS_link] sys_link, [SYS_mknod] = sys_mknod,
[SYS_mkdir] sys_mkdir, [SYS_open] = sys_open,
[SYS_mknod] sys_mknod, [SYS_pipe] = sys_pipe,
[SYS_open] sys_open, [SYS_read] = sys_read,
[SYS_pipe] sys_pipe, [SYS_sbrk] = sys_sbrk,
[SYS_read] sys_read, [SYS_sleep] = sys_sleep,
[SYS_sbrk] sys_sbrk, [SYS_unlink] = sys_unlink,
[SYS_sleep] sys_sleep, [SYS_wait] = sys_wait,
[SYS_unlink] sys_unlink, [SYS_write] = sys_write,
[SYS_wait] sys_wait, [SYS_uptime] = sys_uptime,
[SYS_write] sys_write, [SYS_map] = sys_map,
[SYS_uptime] sys_uptime, [SYS_unmap] = sys_unmap,
[SYS_map] sys_map, [SYS_halt] = sys_halt,
[SYS_unmap] sys_unmap,
[SYS_halt] sys_halt,
#endif
}; };
void void
......
...@@ -8,14 +8,14 @@ ...@@ -8,14 +8,14 @@
#include "condvar.h" #include "condvar.h"
#include "queue.h" #include "queue.h"
#include "proc.h" #include "proc.h"
#include "cpu.h"
#if 0
int int
sys_fork(void) sys_fork(void)
{ {
int flags; int flags;
if(argint(0, &flags) < 0) if(argint32(0, &flags) < 0)
return -1; return -1;
return fork(flags); return fork(flags);
} }
...@@ -38,7 +38,7 @@ sys_kill(void) ...@@ -38,7 +38,7 @@ sys_kill(void)
{ {
int pid; int pid;
if(argint(0, &pid) < 0) if(argint32(0, &pid) < 0)
return -1; return -1;
return kill(pid); return kill(pid);
} }
...@@ -46,18 +46,18 @@ sys_kill(void) ...@@ -46,18 +46,18 @@ sys_kill(void)
int int
sys_getpid(void) sys_getpid(void)
{ {
return proc->pid; return myproc()->pid;
} }
int int
sys_sbrk(void) sys_sbrk(void)
{ {
int addr; uptr addr;
int n; int n;
if(argint(0, &n) < 0) if(argint32(0, &n) < 0)
return -1; return -1;
addr = proc->brk; addr = myproc()->brk;
if(growproc(n) < 0) if(growproc(n) < 0)
return -1; return -1;
return addr; return addr;
...@@ -67,14 +67,14 @@ int ...@@ -67,14 +67,14 @@ int
sys_sleep(void) sys_sleep(void)
{ {
int n; int n;
uint ticks0; u32 ticks0;
if(argint(0, &n) < 0) if(argint32(0, &n) < 0)
return -1; return -1;
acquire(&tickslock); acquire(&tickslock);
ticks0 = ticks; ticks0 = ticks;
while(ticks - ticks0 < n){ while(ticks - ticks0 < n){
if(proc->killed){ if(myproc()->killed){
release(&tickslock); release(&tickslock);
return -1; return -1;
} }
...@@ -89,7 +89,7 @@ sys_sleep(void) ...@@ -89,7 +89,7 @@ sys_sleep(void)
int int
sys_uptime(void) sys_uptime(void)
{ {
uint xticks; u64 xticks;
acquire(&tickslock); acquire(&tickslock);
xticks = ticks; xticks = ticks;
...@@ -100,19 +100,19 @@ sys_uptime(void) ...@@ -100,19 +100,19 @@ sys_uptime(void)
int int
sys_map(void) sys_map(void)
{ {
uint addr; uptr addr;
uint len; u64 len;
if (argint(0, (int*) &addr) < 0) if (argint64(0, &addr) < 0)
return -1; return -1;
if (argint(1, (int*) &len) < 0) if (argint64(1, &len) < 0)
return -1; return -1;
struct vmnode *vmn = vmn_allocpg(PGROUNDUP(len) / PGSIZE); struct vmnode *vmn = vmn_allocpg(PGROUNDUP(len) / PGSIZE);
if (vmn == 0) if (vmn == 0)
return -1; return -1;
if (vmap_insert(proc->vmap, vmn, PGROUNDDOWN(addr)) < 0) { if (vmap_insert(myproc()->vmap, vmn, PGROUNDDOWN(addr)) < 0) {
vmn_free(vmn); vmn_free(vmn);
return -1; return -1;
} }
...@@ -123,28 +123,27 @@ sys_map(void) ...@@ -123,28 +123,27 @@ sys_map(void)
int int
sys_unmap(void) sys_unmap(void)
{ {
uint addr; uptr addr;
uint len; u64 len;
if (argint(0, (int*) &addr) < 0) if (argint64(0, &addr) < 0)
return -1; return -1;
if (argint(1, (int*) &len) < 0) if (argint64(1, &len) < 0)
return -1; return -1;
if (vmap_remove(proc->vmap, PGROUNDDOWN(addr), PGROUNDUP(len)) < 0) if (vmap_remove(myproc()->vmap, PGROUNDDOWN(addr), PGROUNDUP(len)) < 0)
return -1; return -1;
updatepages(proc->vmap->pgdir, updatepages(myproc()->vmap->pml4,
(void*) (PGROUNDDOWN(addr)), (void*) (PGROUNDDOWN(addr)),
(void*) (PGROUNDDOWN(addr)+PGROUNDUP(len)), 0); (void*) (PGROUNDDOWN(addr)+PGROUNDUP(len)), 0);
cli(); cli();
lcr3(v2p(proc->vmap->pgdir)); lcr3(v2p(myproc()->vmap->pml4));
for (uint i = 0; i < ncpu; i++) for (int i = 0; i < ncpu; i++)
if (i != cpu->id) if (i != mycpu()->id)
lapic_tlbflush(i); lapic_tlbflush(i);
sti(); sti();
return 0; return 0;
} }
#endif
int int
sys_halt(void) sys_halt(void)
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
u64 ticks __mpalign__; u64 ticks __mpalign__;
static struct spinlock tickslock __mpalign__; struct spinlock tickslock __mpalign__;
static struct condvar cv_ticks __mpalign__; struct condvar cv_ticks __mpalign__;
struct segdesc __attribute__((aligned(16))) bootgdt[NSEGS] = { struct segdesc __attribute__((aligned(16))) bootgdt[NSEGS] = {
// null // null
......
...@@ -505,6 +505,66 @@ vmn_doload(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz) ...@@ -505,6 +505,66 @@ vmn_doload(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz)
return 0; return 0;
} }
struct vmap *
vmap_copy(struct vmap *m, int share)
{
struct vmap *c = vmap_alloc();
if(c == 0)
return 0;
acquire(&m->lock);
#ifdef TREE
struct state *st = kmalloc(sizeof(struct state));
st->share = share;
st->pgdir = m->pgdir;
st->root = c->root;
if (!tree_foreach(m->root, vmap_copy_vma, st)) {
vmap_free(c);
release(&m->lock);
kmfree(st);
return 0;
}
c->root = st->root;
kmfree(st);
#else
for(int i = 0; i < NELEM(m->e); i++) {
if(m->e[i] == 0)
continue;
c->e[i] = vma_alloc();
if (c->e[i] == 0) {
release(&m->lock);
vmap_free(c);
return 0;
}
c->e[i]->va_start = m->e[i]->va_start;
c->e[i]->va_end = m->e[i]->va_end;
if (share) {
c->e[i]->n = m->e[i]->n;
c->e[i]->va_type = COW;
acquire(&m->e[i]->lock);
m->e[i]->va_type = COW;
updatepages(m->pml4, (void *) (m->e[i]->va_start), (void *) (m->e[i]->va_end), PTE_COW);
release(&m->e[i]->lock);
} else {
c->e[i]->n = vmn_copy(m->e[i]->n);
c->e[i]->va_type = m->e[i]->va_type;
}
if(c->e[i]->n == 0) {
release(&m->lock);
vmap_free(c);
return 0;
}
__sync_fetch_and_add(&c->e[i]->n->ref, 1);
}
#endif
if (share)
lcr3(v2p(m->pml4)); // Reload hardware page table
release(&m->lock);
return c;
}
static struct vma * static struct vma *
pagefault_ondemand(struct vmap *vmap, uptr va, u32 err, struct vma *m) pagefault_ondemand(struct vmap *vmap, uptr va, u32 err, struct vma *m)
{ {
...@@ -604,9 +664,39 @@ vmn_load(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz) ...@@ -604,9 +664,39 @@ vmn_load(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz)
} }
} }
int
vmap_remove(struct vmap *m, uptr va_start, u64 len)
{
acquire(&m->lock);
uptr va_end = va_start + len;
#ifdef TREE
struct kv *kv = tree_find_gt(m->root, va_start);
if (kv == 0)
panic("no vma?");
struct vma *e = (struct vma *) kv->val;
if(e->va_start != va_start || e->va_end != va_end) {
cprintf("vmap_remove: partial unmap unsupported\n");
release(&m->lock);
return -1;
}
m->root = tree_remove(m->root, va_start+len);
rcu_delayed(e, vma_free);
#else
for(int i = 0; i < NELEM(m->e); i++) {
if(m->e[i] && (m->e[i]->va_start < va_end && m->e[i]->va_end > va_start)) {
if(m->e[i]->va_start != va_start || m->e[i]->va_end != va_end) {
release(&m->lock);
cprintf("vmap_remove: partial unmap unsupported\n");
return -1;
}
rcu_delayed(m->e[i], vma_free);
m->e[i] = 0;
}
}
#endif
release(&m->lock);
return 0;
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论