提交 eed1de86 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Enough gunk for a sys_exec, which might or might not be working..

上级 8996d674
// Format of an ELF executable file
// From linux/include/linux/elf.h
#define ELF_MAGIC 0x464C457FU // "\x7FELF" in little endian
// File header
typedef u64 Elf64_Addr;
typedef u64 Elf64_Off;
typedef u16 Elf64_Half;
typedef u32 Elf64_Word;
typedef s32 Elf64_Sword;
typedef u64 Elf64_Xword;
typedef s64 Elf64_Sxword;
typedef s16 Elf64_Section;
struct elfhdr {
uint magic; // must equal ELF_MAGIC
uchar elf[12];
ushort type;
ushort machine;
uint version;
uint entry;
uint phoff;
uint shoff;
uint flags;
ushort ehsize;
ushort phentsize;
ushort phnum;
ushort shentsize;
ushort shnum;
ushort shstrndx;
Elf64_Word magic; // Must equal ELF_MAGIC
u8 ident[12];
Elf64_Half machine;
Elf64_Word version;
Elf64_Addr entry; // Entry point virtual address
Elf64_Off phoff; // Program header table file offset
Elf64_Off shoff; // Section header table file offset
Elf64_Word flags;
Elf64_Half ehsize;
Elf64_Half phentsize;
Elf64_Half phnum;
Elf64_Half shentsize;
Elf64_Half shnum;
Elf64_Half shstrndx;
};
// Program section header
struct proghdr {
uint type;
uint offset;
uint va;
uint pa;
uint filesz;
uint memsz;
uint flags;
uint align;
Elf64_Word type;
Elf64_Word flags;
Elf64_Off offset; // Segment file offset
Elf64_Addr vaddr; // Segment virtual address
Elf64_Addr paddr; // Segment physical address
Elf64_Xword filesz; // Segment size in file
Elf64_Xword memsz; // Segment size in memory
Elf64_Xword align; // Segment alignment, file & memory
};
// Values for Proghdr type
......
......@@ -11,30 +11,29 @@
#include "stat.h"
#include "fs.h"
#include "file.h"
#if 0
#include "elf.h"
#endif
#include "cpu.h"
int
exec(char *path, char **argv)
{
panic("exec");
return 0;
#if 0
char *s, *last;
int i, off, brk = 0;
uint argc, sp, ustack[3+MAXARG+1];
struct inode *ip = NULL;
struct vmap *vmap = NULL;
struct vmnode *vmn = NULL;
struct elfhdr elf;
struct inode *ip = 0;
struct proghdr ph;
struct vmap *vmap = 0, *oldvmap;
struct vmnode *vmn = 0;
uptr brk = 0;
int odp = 1;
u64 off;
int i;
uptr sp;
int argc;
uptr ustack[3+MAXARG+1];
char *s, *last;
struct vmap *oldvmap;
if((ip = namei(path)) == 0)
return -1;
// ilock(ip, 0);
rcu_begin_read();
......@@ -57,13 +56,13 @@ exec(char *path, char **argv)
continue;
if(ph.memsz < ph.filesz)
goto bad;
if(ph.va % PGSIZE) {
if(ph.vaddr % PGSIZE) {
cprintf("unaligned ph.va\n");
goto bad;
}
uint va_start = PGROUNDDOWN(ph.va);
uint va_end = PGROUNDUP(ph.va + ph.memsz);
uptr va_start = PGROUNDDOWN(ph.vaddr);
uptr va_end = PGROUNDUP(ph.vaddr + ph.memsz);
if(va_end > brk)
brk = va_end;
......@@ -75,9 +74,10 @@ exec(char *path, char **argv)
if ((vmn = vmn_allocpg(npg)) == 0)
goto bad;
}
if(vmn_load(vmn, ip, ph.offset, ph.filesz) < 0)
goto bad;
if(vmap_insert(vmap, vmn, ph.va) < 0)
if(vmap_insert(vmap, vmn, ph.vaddr) < 0)
goto bad;
vmn = 0;
}
......@@ -110,7 +110,7 @@ exec(char *path, char **argv)
if(argc >= MAXARG)
goto bad;
sp -= strlen(argv[argc]) + 1;
sp &= ~3;
sp &= ~7;
if(copyout(vmap, sp, argv[argc], strlen(argv[argc]) + 1) < 0)
goto bad;
ustack[3+argc] = sp;
......@@ -119,41 +119,39 @@ exec(char *path, char **argv)
ustack[0] = 0xffffffff; // fake return PC
ustack[1] = argc;
ustack[2] = sp - (argc+1)*4; // argv pointer
ustack[2] = sp - (argc+1)*8; // argv pointer
sp -= (3+argc+1) * 4;
if(copyout(vmap, sp, ustack, (3+argc+1)*4) < 0)
sp -= (3+argc+1) * 8;
if(copyout(vmap, sp, ustack, (3+argc+1)*8) < 0)
goto bad;
// Save program name for debugging.
for(last=s=path; *s; s++)
if(*s == '/')
last = s+1;
safestrcpy(proc->name, last, sizeof(proc->name));
safestrcpy(myproc()->name, last, sizeof(myproc()->name));
// Commit to the user image.
oldvmap = proc->vmap;
proc->vmap = vmap;
proc->brk = brk + 4; // XXX so that brk-1 points within heap vma..
proc->tf->eip = elf.entry; // main
proc->tf->esp = sp;
switchuvm(proc);
oldvmap = myproc()->vmap;
myproc()->vmap = vmap;
myproc()->brk = brk + 8; // XXX so that brk-1 points within heap vma..
myproc()->tf->rip = elf.entry; // main
myproc()->tf->rsp = sp;
switchuvm(myproc());
vmap_decref(oldvmap);
migrate(proc);
migrate(myproc());
rcu_end_read();
return 0;
bad:
cprintf("exec failed\n");
// if(ip)
// iunlockput(ip);
if(vmap)
vmap_decref(vmap);
if(vmn)
vmn_free(vmn);
rcu_end_read();
return -1;
#endif
return 0;
}
......@@ -720,6 +720,8 @@ namex(char *path, int nameiparent, char *name)
{
struct inode *ip, *next;
//cprintf("namex %s\n", path);
rcu_begin_read();
if(*path == '/')
ip = iget(ROOTDEV, ROOTINO);
......
......@@ -2,6 +2,8 @@
#define KBASE 0xFFFFFFFF80000000ull
#define PBASE 0xFFFFFF0000000000ull
// XXX(sbw) arbitrary for right now..
#define USERTOP 0x0000000040000000ull
#define KCSEG (2<<3) /* kernel code segment */
#define KDSEG (3<<3) /* kernel data segment */
......@@ -225,3 +227,5 @@ void vmn_free(struct vmnode *);
void switchuvm(struct proc*);
void switchkvm(void);
int pagefault(struct vmap *, uptr, u32);
void vmap_decref(struct vmap *);
int vmn_load(struct vmnode *, struct inode*, u64, u64);
......@@ -94,6 +94,41 @@ delrun(struct proc *p)
p->on_runq = -1;
}
void
migrate(struct proc *p)
{
int c;
for (c = 0; c < NCPU; c++) {
if (c == mycpu()->id)
continue;
if (idle[c]) { // OK if there is a race
acquire(&p->lock);
if (p->state != RUNNABLE || p->cpu_pin) {
release(&p->lock);
continue;
}
if (sched_debug)
cprintf("cpu%d: migrate %d to %d\n", mycpu()->id, p->pid, c);
delrun(p);
p->curcycles = 0;
p->cpuid = c;
addrun(p);
idle[c] = 0;
if (p == myproc()) {
myproc()->state = RUNNABLE;
sched();
}
release(&p->lock);
return;
}
}
}
// A fork child's very first scheduling by scheduler()
// will swtch here. "Return" to user space.
static void
......
......@@ -7,12 +7,15 @@ typedef uint64 uintptr;
typedef uint8 u8;
typedef uint16 u16;
typedef short s16;
typedef uint32 u32;
typedef int s32;
typedef int64 s64;
typedef uint64 u64;
typedef uint64 uptr;
typedef uptr paddr;
typedef u64 pme_t; // Page Map Entry (refers to any entry in any level)
typedef pme_t pml4e_t;
......
......@@ -145,6 +145,90 @@ vma_alloc(void)
return e;
}
static void
vmn_decref(struct vmnode *n)
{
if(subfetch(&n->ref, 1) == 0)
vmn_free(n);
}
static void
vma_free(void *p)
{
struct vma *e = (struct vma *) p;
if(e->n)
vmn_decref(e->n);
kmfree(e);
}
// Free a page table and all the physical memory pages
// in the user part.
static void
freevm(pml4e_t *pml4)
{
if(pml4 == 0)
panic("freevm: no pgdir");
cprintf("freevm: XXX leaking..\n");
#if 0
for(i = 0; i < 1024; i++){
if(pgdir[i] & PTE_P)
kfree(p2v(PTE_ADDR(pgdir[i])));
}
#endif
kfree(pml4);
}
#ifdef TREE
struct state {
int share;
void *pgdir;
struct node *root;
};
static int
vmap_free_vma(struct kv *kv, void *p)
{
struct state *st = (struct state *) p;
vma_free(kv->val);
st->root = tree_remove(st->root, kv->key);
return 1;
}
static void
vmap_free(void *p)
{
struct vmap *m = (struct vmap *) p;
struct state *st = kmalloc(sizeof(struct state));
st->root = m->root;
tree_foreach(m->root, vmap_free_vma, st);
m->root = st->root;
freevm(m->pgdir);
kmfree(st);
m->pgdir = 0;
m->alloc = 0;
}
#else
static void
vmap_free(void *p)
{
struct vmap *m = (struct vmap *) p;
for(u64 i = 0; i < NELEM(m->e); i++) {
if (m->e[i])
vma_free(m->e[i]);
}
freevm(m->pml4);
m->pml4 = 0;
m->alloc = 0;
}
#endif
void
vmap_decref(struct vmap *m)
{
if(subfetch(&m->ref, 1) == 0)
vmap_free(m);
}
// Does any vma overlap start..start+len?
// If yes, return the vma pointer.
// If no, return 0.
......@@ -380,13 +464,6 @@ updatepages(pme_t *pml4, void *begin, void *end, int perm)
}
}
static void
vmn_decref(struct vmnode *n)
{
if(subfetch(&n->ref, 1) == 0)
vmn_free(n);
}
struct vmnode *
vmn_copy(struct vmnode *n)
{
......@@ -513,6 +590,33 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
return 1;
}
// Load a program segment into a vmnode.
int
vmn_load(struct vmnode *vmn, struct inode *ip, u64 offset, u64 sz)
{
if (vmn->type == ONDEMAND) {
vmn->ip = ip;
vmn->offset = offset;
vmn->sz = sz;
return 0;
} else {
return vmn_doload(vmn, ip, offset, sz);
}
}
#if 0
void
printpgdir(pde_t *pgdir)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论