提交 42ab8787 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Enough to compile and link inituser, but panic on addrun.

上级 5628c3d8
......@@ -40,10 +40,17 @@ CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 &
ASFLAGS = -m64 -gdwarf-2
LDFLAGS += -m elf_x86_64
kernel: boot.o $(OBJS)
$(LD) $(LDFLAGS) -T kernel.ld -z max-page-size=4096 -e start -o $@ boot.o $(OBJS)
kernel: boot.o $(OBJS) initcode
$(LD) $(LDFLAGS) -T kernel.ld -z max-page-size=4096 -e start \
-o $@ boot.o $(OBJS) -b binary initcode
$(OBJDUMP) -S $@ >$@.asm
initcode: initcode.S
$(CC) $(CFLAGS) -nostdinc -I. -c initcode.S
$(LD) $(LDFLAGS) -N -e start -Ttext 0 -o initcode.out initcode.o
$(OBJCOPY) -S -O binary initcode.out initcode
$(OBJDUMP) -S initcode.o > initcode.asm
xv6memfs.img: bootblock kernelmemfs
dd if=/dev/zero of=xv6memfs.img count=10000
dd if=bootblock of=xv6memfs.img conv=notrunc
......
......@@ -15,6 +15,12 @@ namei(char *path)
return NULL;
}
void
iput(struct inode *ip)
{
panic("iput");
}
#if 0
// File system implementation. Four layers:
// + Blocks: allocator for raw disk blocks.
......
......@@ -263,249 +263,8 @@ kmalloc(u64 nbytes)
return r;
}
#if 0
static void __attribute__((unused))
kmemprint(void)
{
cprintf("free pages: [ ");
for (uint i = 0; i < NCPU; i++)
if (i == cpu->id)
cprintf("<%d> ", kmems[i].nfree);
else
cprintf("%d ", kmems[i].nfree);
cprintf("]\n");
}
//PAGEBREAK: 21
// Free the page of physical memory pointed at by v,
// which normally should have been returned by a
// call to kalloc(). (The exception is when
// initializing the allocator; see kinit above.)
static void
kfree_pool(struct kmem *m, char *v)
{
struct run *r;
if((uint)v % PGSIZE || v < end || v2p(v) >= PHYSTOP) {
panic("kfree_pool");
}
// Fill with junk to catch dangling refs.
if (kinited && kalloc_memset)
memset(v, 1, PGSIZE);
acquire(&m->lock);
r = (struct run*)v;
r->next = m->freelist;
m->freelist = r;
m->nfree++;
if (kinited)
mtrace_label_register(mtrace_label_block,
r,
0,
0,
0,
RET_EIP());
release(&m->lock);
}
void
kfree(void *v)
{
kfree_pool(kmem, v);
}
// Initialize free list of physical pages.
void
kinit(void)
{
char *p;
for (int c = 0; c < NCPU; c++) {
kmems[c].name[0] = (char) c + '0';
safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1);
initlock(&kmems[c].lock, kmems[c].name);
}
p = (char*)PGROUNDUP((uint)newend);
for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE) {
kfree_pool(&kmems[((uintptr_t) v2p(p)) / (PHYSTOP/NCPU)], p);
}
kminit();
kinited = 1;
}
// Allocate one 4096-byte page of physical memory.
// Returns a pointer that the kernel can use.
// Returns 0 if the memory cannot be allocated.
char*
kalloc(void)
{
struct run *r = 0;
// cprintf("%d: kalloc 0x%x 0x%x 0x%x 0x%x 0%x\n", cpu->id, kmem, &kmems[cpu->id], kmem->freelist, PHYSTOP, kmems[1].freelist);
uint startcpu = cpu->id;
for (uint i = 0; r == 0 && i < NCPU; i++) {
int cn = (i + startcpu) % NCPU;
struct kmem *m = &kmems[cn];
acquire(&m->lock);
r = m->freelist;
if (r) {
m->freelist = r->next;
m->nfree--;
}
release(&m->lock);
}
if (r == 0) {
cprintf("kalloc: out of memory\n");
kmemprint();
return 0;
}
mtrace_label_register(mtrace_label_block,
r,
4096,
"kalloc",
sizeof("kalloc"),
RET_EIP());
if (kalloc_memset)
memset(r, 2, PGSIZE);
return (char*)r;
}
// Memory allocator by Kernighan and Ritchie,
// The C programming Language, 2nd ed. Section 8.7.
typedef struct header {
struct header *ptr;
uint size; // in multiples of sizeof(Header)
} __attribute__ ((aligned (CACHELINE))) Header;
static struct freelist {
Header base;
Header *freep; // last allocated block
struct spinlock lock;
char name[MAXNAME];
} freelists[NCPU];
void
kminit(void)
{
for (int c = 0; c < NCPU; c++) {
freelists[c].name[0] = (char) c + '0';
safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1);
initlock(&freelists[c].lock, freelists[c].name);
}
}
static void
domfree(void *ap)
{
Header *bp, *p;
bp = (Header*)ap - 1;
if (kalloc_memset)
memset(ap, 3, (bp->size-1) * sizeof(*bp));
for(p = freelists[cpu->id].freep; !(bp > p && bp < p->ptr); p = p->ptr)
if(p >= p->ptr && (bp > p || bp < p->ptr))
break;
if(bp + bp->size == p->ptr){
bp->size += p->ptr->size;
bp->ptr = p->ptr->ptr;
} else
bp->ptr = p->ptr;
if(p + p->size == bp){
p->size += bp->size;
p->ptr = bp->ptr;
} else
p->ptr = bp;
freelists[cpu->id].freep = p;
}
void
kmfree(void *ap)
{
acquire(&freelists[cpu->id].lock);
domfree(ap);
mtrace_label_register(mtrace_label_heap,
ap,
0,
0,
0,
RET_EIP());
release(&freelists[cpu->id].lock);
}
// Caller should hold free_locky
static Header*
morecore(uint nu)
{
char *p;
Header *hp;
static uint units_per_page = PGSIZE / sizeof(Header);
if(nu != units_per_page) {
if (nu > units_per_page)
panic("morecore");
nu = units_per_page; // we allocate nu * sizeof(Header)
}
p = kalloc();
if(p == 0)
return 0;
hp = (Header*)p;
hp->size = nu;
domfree((void*)(hp + 1));
return freelists[cpu->id].freep;
}
void*
kmalloc(uint nbytes)
{
Header *p, *prevp;
uint nunits;
void *r = 0;
acquire(&freelists[cpu->id].lock);
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
if((prevp = freelists[cpu->id].freep) == 0){
freelists[cpu->id].base.ptr = freelists[cpu->id].freep = prevp = &freelists[cpu->id].base;
freelists[cpu->id].base.size = 0;
}
for(p = prevp->ptr; ; prevp = p, p = p->ptr){
if(p->size >= nunits){
if(p->size == nunits)
prevp->ptr = p->ptr;
else {
p->size -= nunits;
p += p->size;
p->size = nunits;
}
freelists[cpu->id].freep = prevp;
r = (void*)(p + 1);
break;
}
if(p == freelists[cpu->id].freep)
if((p = morecore(nunits)) == 0)
break;
}
release(&freelists[cpu->id].lock);
if (r)
mtrace_label_register(mtrace_label_heap,
r,
nbytes,
"kmalloc'ed",
sizeof("kmalloc'ed"),
RET_EIP());
return r;
kfree_pool(mykmem(), v);
}
#endif
......@@ -9,10 +9,13 @@
static inline uptr v2p(void *a) { return (uptr) a - KBASE; }
static inline void *p2v(uptr a) { return (void *) a + KBASE; }
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
struct spinlock;
struct condvar;
struct proc;
struct vmnode;
struct inode;
struct proc;
struct vmap;
// bio.c
......@@ -37,6 +40,7 @@ void snprintf(char *buf, u32 n, char *fmt, ...);
// fs.c
int namecmp(const char*, const char*);
struct inode* namei(char*);
void iput(struct inode*);
// ide.c
void ideinit(void);
......@@ -151,6 +155,9 @@ void uartputc(char c);
// vm.c
struct vmap * vmap_alloc(void);
struct vmnode* vmn_allocpg(u64 npg);
int vmap_insert(struct vmap *, struct vmnode *n, uptr);
struct vmnode* vmn_alloc(u64, u32);
struct vmnode* vmn_allocpg(u64);
int vmap_insert(struct vmap*, struct vmnode *, uptr);
struct vma * vmap_lookup(struct vmap*, uptr, uptr);
int copyout(struct vmap *, uptr, void*, u64);
void vmn_free(struct vmnode *);
#include "spinlock.h"
// A mapping of a chunk of an address space to
// a specific memory object.
enum vmatype { PRIVATE, COW};
struct vma {
uptr va_start; // start of mapping
uptr va_end; // one past the last byte
enum vmatype va_type;
struct vmnode *n;
struct spinlock lock; // serialize fault/unmap
char lockname[16];
};
// A memory object (physical pages or inode).
enum vmntype { EAGER, ONDEMAND};
struct vmnode {
u64 npages;
char *page[128];
u64 ref;
enum vmntype type;
struct inode *ip;
u64 offset;
u64 sz;
};
// An address space: a set of vmas plus h/w page table.
// The elements of e[] are not ordered by address.
struct vmap {
#ifdef TREE
struct node* root;
#else
struct vma* e[16];
#endif
struct spinlock lock; // serialize map/lookup/unmap
u64 ref;
u64 alloc;
pml4e_t *pml4; // Page table
char lockname[16];
};
// Saved registers for kernel context switches.
struct context {
......
......@@ -8,6 +8,9 @@
#include "bits.h"
#include "spinlock.h"
#include "kalloc.h"
#include "queue.h"
#include "condvar.h"
#include "proc.h"
extern char data[]; // defined in data.S
......@@ -88,6 +91,230 @@ initseg(void)
c->kmem = &kmems[cpunum()];
}
// Set up kernel part of a page table.
static pml4e_t*
setupkvm(void)
{
pml4e_t *pml4;
if((pml4 = (pml4e_t*)kalloc()) == 0)
return 0;
memmove(pml4, kpml4, PGSIZE);
return pml4;
}
static struct vma *
vma_alloc(void)
{
struct vma *e = kmalloc(sizeof(struct vma));
if (e == 0)
return 0;
memset(e, 0, sizeof(struct vma));
e->va_type = PRIVATE;
snprintf(e->lockname, sizeof(e->lockname), "vma:%p", e);
initlock(&e->lock, e->lockname);
return e;
}
// Does any vma overlap start..start+len?
// If yes, return the vma pointer.
// If no, return 0.
// This code can't handle regions at the very end
// of the address space, e.g. 0xffffffff..0x0
// We key vma's by their end address.
struct vma *
vmap_lookup(struct vmap *m, uptr start, uptr len)
{
if(start + len < start)
panic("vmap_lookup bad len");
#ifdef TREE
struct kv *kv = tree_find_gt(m->root, start); // find vma with va_end > start
if (kv != 0) {
struct vma *e = (struct vma *) (kv->val);
if (e->va_end <= e->va_start)
panic("malformed va");
if (e->va_start < start+len && e->va_end > start) {
return e;
}
}
#else
for(u64 i = 0; i < NELEM(m->e); i++){
struct vma *e = m->e[i];
if(e) {
if(e->va_end <= e->va_start) // XXX shouldn't this involve start and len?
panic("vmap_lookup bad vma");
if(e->va_start < start+len && e->va_end > start)
return e;
}
}
#endif
return 0;
}
struct vmap *
vmap_alloc(void)
{
struct vmap *m = kmalloc(sizeof(struct vmap));
if (m == 0)
return 0;
memset(m, 0, sizeof(struct vmap));
snprintf(m->lockname, sizeof(m->lockname), "vmap:%p", m);
initlock(&m->lock, m->lockname);
m->ref = 1;
m->pml4 = setupkvm();
if (m->pml4 == 0) {
cprintf("vmap_alloc: setupkvm out of memory\n");
kmfree(m);
return 0;
}
return m;
}
int
vmap_insert(struct vmap *m, struct vmnode *n, uptr va_start)
{
acquire(&m->lock);
u64 len = n->npages * PGSIZE;
if(vmap_lookup(m, va_start, len)){
cprintf("vmap_insert: overlap\n");
release(&m->lock);
return -1;
}
#ifdef TREE
struct vma *e = vma_alloc();
struct kv kv;
if (e == 0) {
release(&m->lock);
return -1;
}
e->va_start = va_start;
e->va_end = va_start + len;
e->n = n;
__sync_fetch_and_add(&n->ref, 1);
kv.key = e->va_end;
kv.val = e;
m->root = tree_insert(m->root, &kv);
release(&m->lock);
return 0;
#else
for(u64 i = 0; i < NELEM(m->e); i++) {
if(m->e[i])
continue;
m->e[i] = vma_alloc();
if (m->e[i] == 0)
return -1;
m->e[i]->va_start = va_start;
m->e[i]->va_end = va_start + len;
m->e[i]->n = n;
__sync_fetch_and_add(&n->ref, 1);
release(&m->lock);
return 0;
}
release(&m->lock);
cprintf("vmap_insert: out of vma slots\n");
return -1;
#endif
}
struct vmnode *
vmn_alloc(u64 npg, enum vmntype type)
{
struct vmnode *n = kmalloc(sizeof(struct vmnode));
if (n == 0) {
cprintf("out of vmnodes");
return 0;
}
if(npg > NELEM(n->page)) {
panic("vmnode too big\n");
}
memset(n, 0, sizeof(struct vmnode));
n->npages = npg;
n->type = type;
return n;
}
static int
vmn_doallocpg(struct vmnode *n)
{
for(u64 i = 0; i < n->npages; i++) {
if((n->page[i] = kalloc()) == 0)
return -1;
memset((char *) n->page[i], 0, PGSIZE);
}
return 0;
}
struct vmnode *
vmn_allocpg(u64 npg)
{
struct vmnode *n = vmn_alloc(npg, EAGER);
if (n == 0) return 0;
if (vmn_doallocpg(n) < 0) {
vmn_free(n);
return 0;
}
return n;
}
void
vmn_free(struct vmnode *n)
{
for(u64 i = 0; i < n->npages; i++) {
if (n->page[i]) {
kfree(n->page[i]);
n->page[i] = 0;
}
}
if (n->ip)
iput(n->ip);
n->ip = 0;
kmfree(n);
}
// Copy len bytes from p to user address va in vmap.
// Most useful when vmap is not the current page table.
int
copyout(struct vmap *vmap, uptr va, void *p, u64 len)
{
char *buf = (char*)p;
while(len > 0){
uptr va0 = (uptr)PGROUNDDOWN(va);
rcu_begin_read();
struct vma *vma = vmap_lookup(vmap, va, 1);
if(vma == 0) {
rcu_end_read();
return -1;
}
acquire(&vma->lock);
uptr pn = (va0 - vma->va_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
panic("copyout: missing page");
uptr n = PGSIZE - (va - va0);
if(n > len)
n = len;
memmove(p0 + (va - va0), buf, n);
len -= n;
buf += n;
va = va0 + PGSIZE;
release(&vma->lock);
rcu_end_read();
}
return 0;
}
#if 0
void
printpgdir(pde_t *pgdir)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论