提交 c48f7666 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Merge

......@@ -10,3 +10,4 @@ o.*
/mscan.kern
config.mk
lwip
/.passwd.*
......@@ -111,5 +111,8 @@ mscan.sorted: mscan.out $(QEMUSRC)/mtrace-tools/sersec-sort
rsync: $(KERN)
rsync -avP $(KERN) amsterdam.csail.mit.edu:/tftpboot/$(HW)/kernel.xv6
reboot:
ipmitool -I lanplus -A MD5 -H $(HW)adm.csail.mit.edu -U ADMIN -f .passwd.$(HW) power reset
clean:
rm -fr $(O)
......@@ -9,7 +9,7 @@
* Networking with lwIP
$ git clone git://git.savannah.nongnu.org/lwip.git
$ (cd lwip && git checkout STABLE-1_4_0)
$ (cd lwip && git checkout DEVEL-1_4_1)
$ make clean;make
* Copies of e1000 and e1000e manuals are in manuals/. They were
......@@ -40,6 +40,7 @@
mkdir build
cd build
../configure --target=x86_64-jos-elf --with-gmp=/usr/local/ --with-mpfr=/usr/local --enable-languages=c,c++ --without-headers --disable-nls
make all-gcc ; sudo make install-gcc
* clang
- Version 3.0 or greater is required to build xv6
......
## Save the IPMI password in ./.passwd.josmp
## To build and run xv6 on josmp:
make HW=josmp
make HW=josmp rsync
ssh root@pdos ed /tftpboot/josmp/pxelinux.cfg <<EOM
/^default /s/^/#/
/^# *default xv6/s/^# *//
......@@ -10,7 +8,8 @@ wq
EOM
ssh root@josmp reboot
ipmitool -I lanplus -A MD5 -H josmpadm.csail.mit.edu -U ADMIN power reset
make HW=josmp rsync reboot
---
......@@ -22,5 +21,5 @@ ssh root@pdos ed /tftpboot/josmp/pxelinux.cfg <<EOM
wq
EOM
ipmitool -I lanplus -A MD5 -H josmpadm.csail.mit.edu -U ADMIN power reset
make HW=josmp reboot
......@@ -7,30 +7,31 @@
static struct uspinlock l;
static volatile int tcount;
enum { readaccess = 1 };
enum { readaccess = 0 };
enum { verbose = 0 };
enum { npg = 1 };
void
thr(void *arg)
{
u64 tid = (u64)arg;
for (int i = 0; i < 10000; i++) {
for (int i = 0; i < 1000000; i++) {
if (verbose && ((i % 100) == 0))
fprintf(1, "%d: %d ops\n", tid, i);
volatile char *p = (char*) (0x40000UL + tid * 8 * 4096);
if (map((void *) p, 8 * 4096) < 0) {
volatile char *p = (char*) (0x40000UL + tid * npg * 4096);
if (map((void *) p, npg * 4096) < 0) {
fprintf(1, "%d: map failed\n", tid);
exit();
}
if (readaccess) {
for (int j = 0; j < 8 * 4096; j++)
for (int j = 0; j < npg * 4096; j++)
p[j] = '\0';
}
if (unmap((void *) p, 8 * 4096) < 0) {
if (unmap((void *) p, npg * 4096) < 0) {
fprintf(1, "%d: unmap failed\n", tid);
exit();
}
......
#include "gc.hh"
#include "atomic.hh"
#include "cpputil.hh"
using std::atomic;
struct buf : public rcu_freed {
atomic<int> flags;
u32 dev;
u64 sector;
const u32 dev;
const u64 sector;
struct buf *prev; // LRU cache list
struct buf *next;
struct buf *qnext; // disk queue
......@@ -15,8 +16,19 @@ struct buf : public rcu_freed {
struct spinlock lock;
u8 data[512];
buf() : rcu_freed("buf") {}
buf(u32 d, u64 s) : rcu_freed("buf"), dev(d), sector(s) {
snprintf(lockname, sizeof(lockname), "cv:buf:%d", sector);
initlock(&lock, lockname+3, LOCKSTAT_BIO);
initcondvar(&cv, lockname);
}
~buf() {
destroycondvar(&cv);
destroylock(&lock);
}
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(buf)
};
#define B_BUSY 0x1 // buffer is locked by some process
#define B_VALID 0x2 // buffer has been read from disk
......
......@@ -67,8 +67,20 @@ namespace std {
}
/* C++ runtime */
void *operator new(unsigned long nbytes);
void *operator new(unsigned long nbytes, void *buf);
void operator delete(void *p);
// void *operator new(unsigned long nbytes);
// void *operator new(unsigned long nbytes, void *buf);
void *operator new[](unsigned long nbytes);
// void operator delete(void *p);
void operator delete[](void *p);
extern "C" void __cxa_pure_virtual(void);
#define NEW_DELETE_OPS(classname) \
static void* operator new(unsigned long nbytes) { \
assert(nbytes == sizeof(classname)); \
return kmalloc(sizeof(classname)); \
} \
\
static void operator delete(void *p) { \
return kmfree(p, sizeof(classname)); \
}
#pragma once
#include "mmu.h"
#include "atomic.hh"
......
......@@ -100,15 +100,22 @@ struct range : public rcu_freed {
protected:
~range();
range(crange *cr, u64 k, u64 sz, int nlevel = 0);
public:
range(crange *cr, u64 k, u64 sz, int nlevel = 0);
bool deleted() { return next[0].mark(); }
} __mpalign__;
struct range_head : public range {
range_head(crange *cr, u64 k, u64 sz, int nlevel)
: range(cr, k, sz, nlevel) {}
NEW_DELETE_OPS(range_head)
virtual void do_gc() {
delete this;
}
} __mpalign__;
};
class range_iterator {
private:
......@@ -128,7 +135,7 @@ class crange_locked;
struct crange {
private:
const int nlevel; // number of levels in the crange skip list
range *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
range_head *const crange_head; // a crange skip list starts with a sentinel range (key 0, sz 0)
static void mark(range *f, range *s);
static int lock_range(u64 k, u64 sz, int l, range **er, range **pr, range **fr, range **lr, range **sr);
......@@ -151,6 +158,7 @@ struct crange {
range_iterator begin() const { return range_iterator(crange_head->next[0].ptr()); };
range_iterator end() const { return range_iterator(0); };
NEW_DELETE_OPS(crange)
};
static inline range_iterator
......
......@@ -6,3 +6,4 @@
#include "atomic.hh"
#include "proc.hh"
#include "cpu.hh"
#include "cpputil.hh"
......@@ -42,6 +42,7 @@ struct inode : public rcu_freed {
inode();
~inode();
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(inode)
};
#define I_BUSYR 0x1
......
#pragma once
#define RCU_TYPE_DEBUG 1
class rcu_freed {
public:
u64 _rcu_epoch;
......
#include "atomic.hh"
struct run {
struct run *next;
};
......@@ -6,9 +8,8 @@ struct kmem {
char name[MAXNAME];
u64 size;
u64 ninit;
struct spinlock lock;
struct run *freelist;
u64 nfree;
std::atomic<run*> freelist;
std::atomic<u64> nfree;
} __mpalign__;
enum {
......
......@@ -46,6 +46,7 @@ extern u64 ticks;
extern struct spinlock tickslock;
extern struct condvar cv_ticks;
void initcondvar(struct condvar *, const char *);
void destroycondvar(struct condvar *);
void cv_sleep(struct condvar *cv, struct spinlock*);
void cv_sleepto(struct condvar *cv, struct spinlock*, u64);
void cv_wakeup(struct condvar *cv);
......@@ -118,12 +119,13 @@ char* kalloc(void);
void kfree(void*);
void* ksalloc(int slabtype);
void ksfree(int slabtype, void*);
void* kmalloc(u64);
void kmfree(void*);
void* kmalloc(u64 nbytes);
void kmfree(void*, u64 nbytes);
int kmalign(void **p, int align, u64 size);
void kmalignfree(void *);
void kmalignfree(void *, int align, u64 size);
void verifyfree(char *ptr, u64 nbytes);
void kminit(void);
void kmemprint(void);
// kbd.c
void kbdintr(void);
......@@ -206,7 +208,7 @@ int kmemcpy(void*, void*, u64);
void syscall(void);
// string.c
int memcmp(const void*, const void*, u32);
extern "C" int memcmp(const void*, const void*, u32);
void* memmove(void*, const void*, u32);
extern "C" void* memset(void*, int, u32);
extern "C" void* memcpy(void*, const void *, u32);
......
......@@ -18,7 +18,12 @@
#define NELEM(x) (sizeof(x)/sizeof((x)[0]))
#if 0
#define __offsetof offsetof
#else
#define __offsetof(type, field) (((uptr) &((type*)0x1000000)->field)-0x1000000)
#endif
#define __mpalign__ __attribute__((aligned(CACHELINE)))
#define __padout__ char __padout[0] __attribute__((aligned(CACHELINE)))
#define __noret__ __attribute__((noreturn))
......@@ -28,6 +28,9 @@ struct klockstat : public rcu_freed {
klockstat(const char *name);
virtual void do_gc() { delete this; }
static void* operator new(unsigned long nbytes);
static void operator delete(void *p);
};
#else
struct klockstat;
......
......@@ -20,7 +20,11 @@ class xelem : public rcu_freed {
K key;
xelem(const K &k, const V &v) : rcu_freed("xelem"), val(v), next_lock(0), next(0), key(k) {}
virtual void do_gc() { delete this; }
virtual void do_gc() {
delete this;
}
NEW_DELETE_OPS(xelem)
};
template<class K, class V>
......@@ -215,6 +219,8 @@ class xns : public rcu_freed {
iterator end() {
return iterator();
}
NEW_DELETE_OPS(xns)
};
template<class K, class V, u64 (*HF)(const K&)>
......
#pragma once
#include "spinlock.h"
#include "atomic.hh"
#include "cpputil.hh"
// Saved registers for kernel context switches.
// (also implicitly defined in swtch.S)
......@@ -34,7 +37,7 @@ struct mtrace_stacks {
enum procstate { EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Per-process state
struct proc {
struct proc : public rcu_freed {
struct vmap *vmap; // va -> vma
uptr brk; // Top of heap
char *kstack; // Bottom of kernel stack for this process
......@@ -71,6 +74,33 @@ struct proc {
u64 cv_wakeup; // Wakeup time for this process
LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0), epoch_depth(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initlock(&gc_epoch_lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq));
memset(&child_next, 0, sizeof(child_next));
memset(ofile, 0, sizeof(ofile));
memset(&runqlink, 0, sizeof(runqlink));
memset(&cv_waiters, 0, sizeof(cv_waiters));
memset(&cv_sleep, 0, sizeof(cv_sleep));
}
~proc() {
destroylock(&lock);
destroylock(&gc_epoch_lock);
destroycondvar(&cv);
}
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(proc)
};
static inline void
......
#include "gc.hh"
#include "atomic.hh"
#include "crange_arch.hh"
#include "crange.hh"
#include "cpputil.hh"
#include "hwvm.hh"
......@@ -26,6 +27,7 @@ struct vmnode {
vmnode* copy();
int demand_load();
NEW_DELETE_OPS(vmnode)
};
// A mapping of a chunk of an address space to
......@@ -42,6 +44,7 @@ struct vma : public range {
~vma();
virtual void do_gc() { delete this; }
NEW_DELETE_OPS(vma)
};
// An address space: a set of vmas plus h/w page table.
......@@ -65,6 +68,7 @@ struct vmap {
int pagefault(uptr va, u32 err);
int copyout(uptr va, void *p, u64 len);
NEW_DELETE_OPS(vmap)
private:
int pagefault_wcow(vma *m);
......
......@@ -100,20 +100,13 @@ bget(u32 dev, u64 sector, int *writer)
victim->flags |= B_BUSY;
bufns->remove(mkpair(victim->dev, victim->sector), &victim);
release(&victim->lock);
destroylock(&victim->lock);
gc_delayed(victim);
b = new buf();
b->dev = dev;
b->sector = sector;
b = new buf(dev, sector);
b->flags = B_BUSY;
*writer = 1;
snprintf(b->lockname, sizeof(b->lockname), "cv:buf:%d", b->sector);
initlock(&b->lock, b->lockname+3, LOCKSTAT_BIO);
initcondvar(&b->cv, b->lockname);
gc_begin_epoch();
if (bufns->insert(mkpair(b->dev, b->sector), b) < 0) {
destroylock(&b->lock);
gc_delayed(b);
goto loop;
}
......@@ -169,12 +162,9 @@ initbio(void)
bufns = new xns<pair<u32, u64>, buf*, bio_hash>(false);
for (u64 i = 0; i < NBUF; i++) {
struct buf *b = new buf();
b->dev = 0xdeadbeef;
b->sector = -i; /* dummy to pre-allocate NBUF spaces for evict */
struct buf *b = new buf(0xdeadbeef, -i);
/* dummy to pre-allocate NBUF spaces for evict */
b->flags = 0;
initlock(&b->lock, "bcache-lock", LOCKSTAT_BIO);
initcondvar(&b->cv, "bcache-cv");
if (bufns->insert(mkpair(b->dev, b->sector), b) < 0)
panic("binit ns_insert");
}
......
#include "types.h"
#include "mmu.h"
#include "spinlock.h"
#include "kalloc.h"
#include "syscall.h"
#include "kern_c.h"
......@@ -27,24 +26,6 @@ struct segdesc __attribute__((aligned(16))) bootgdt[NSEGS] = {
[7]=SEGDESC(0, 0, SEG_R|SEG_CODE|SEG_S|SEG_DPL(3)|SEG_P|SEG_L|SEG_G),
};
struct kmem slabmem[slab_type_max][NCPU] = {
[slab_stack][0 ... NCPU-1] = {
.name = " kstack",
.size = KSTACKSIZE,
.ninit = CPUKSTACKS,
},
[slab_perf][0 ... NCPU-1] = {
.name = " kperf",
.size = PERFSIZE,
.ninit = 1,
},
[slab_kshared][0 ... NCPU-1] = {
.name = " kshared",
.size = KSHAREDSIZE,
.ninit = CPUKSTACKS,
},
};
#define SYSCALL(name) [SYS_##name] = (void*)sys_##name
long (*syscalls[])(u64, u64, u64, u64, u64, u64) = {
......
......@@ -138,3 +138,9 @@ initcondvar(struct condvar *cv, const char *n)
initlock(&cv->lock, n, LOCKSTAT_CONDVAR);
LIST_INIT(&cv->waiters);
}
void
destroycondvar(struct condvar *cv)
{
destroylock(&cv->lock);
}
......@@ -282,6 +282,9 @@ consoleintr(int (*getc)(void))
profenable = 0;
cprintf("prof disabled\n");
break;
case C('F'): // kmem stats
kmemprint();
break;
default:
if(c != 0 && input.e-input.r < INPUT_BUF){
c = (c == '\r') ? '\n' : c;
......
......@@ -3,33 +3,19 @@
#include "cpputil.hh"
void *
operator new(unsigned long nbytes)
{
return kmalloc(nbytes);
}
void *
operator new(unsigned long nbytes, void *buf)
{
return buf;
}
void *
operator new[](unsigned long nbytes)
{
return kmalloc(nbytes);
}
void
operator delete(void *p)
{
kmfree(p);
u64 *x = (u64*) kmalloc(nbytes + sizeof(u64));
*x = nbytes + sizeof(u64);
return x+1;
}
void
operator delete[](void *p)
{
kmfree(p);
u64 *x = (u64*) p;
x--;
kmfree(x, *x);
}
void
......
......@@ -91,7 +91,8 @@ range::~range()
for (int l = 0; l < nlevel; l++) {
next[l] = (struct range *) 0xDEADBEEF;
}
kmalignfree(lock);
destroylock(lock);
kmalignfree(lock, CACHELINE, sizeof(struct spinlock));
delete[] next;
}
......@@ -176,7 +177,7 @@ crange::print(int full)
}
crange::crange(int nl)
: nlevel(nl), crange_head(new range(this, 0, 0, nlevel))
: nlevel(nl), crange_head(new range_head(this, 0, 0, nlevel))
{
assert(nl > 0);
dprintf("crange::crange return 0x%lx\n", (u64) this);
......
......@@ -42,7 +42,7 @@ fileclose(struct file *f)
netclose(f->socket);
else
panic("fileclose bad type");
kmfree(f);
kmfree(f, sizeof(struct file));
}
// Get metadata about file f.
......
......@@ -231,6 +231,7 @@ inode::~inode()
}
destroylock(&lock);
destroycondvar(&cv);
}
struct inode*
......@@ -484,6 +485,8 @@ class diskblock : public rcu_freed {
bfree(_dev, _block);
delete this;
}
NEW_DELETE_OPS(diskblock)
};
static void
......
......@@ -5,7 +5,7 @@
#include "kernel.hh"
#include "bits.hh"
#include "spinlock.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "queue.h"
#include "condvar.h"
#include "proc.hh"
......
......@@ -7,7 +7,7 @@
#include "mmu.h"
#include "kernel.hh"
#include "spinlock.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "mtrace.h"
#include "cpu.hh"
#include "multiboot.hh"
......@@ -16,6 +16,7 @@ static struct Mbmem mem[128];
static u64 nmem;
static u64 membytes;
struct kmem kmems[NCPU];
struct kmem slabmem[slab_type_max][NCPU];
extern char end[]; // first address after kernel loaded from ELF file
char *newend;
......@@ -126,49 +127,60 @@ kfree_pool(struct kmem *m, char *v)
if (ALLOC_MEMSET && kinited && m->size <= 16384)
memset(v, 1, m->size);
acquire(&m->lock);
r = (struct run*)v;
r->next = m->freelist;
m->freelist = r;
while (!cmpxch_update(&m->freelist, &r->next, r))
; /* spin */
m->nfree++;
if (kinited)
mtunlabel(mtrace_label_block, r);
release(&m->lock);
}
static void __attribute__((unused))
kmemprint(void)
static void
kmemprint_pool(struct kmem *km)
{
cprintf("free pages: [ ");
cprintf("pool %s: [ ", &km[0].name[1]);
for (u32 i = 0; i < NCPU; i++)
if (i == mycpu()->id)
cprintf("<%lu> ", kmems[i].nfree);
cprintf("<%lu> ", km[i].nfree.load());
else
cprintf("%lu ", kmems[i].nfree);
cprintf("%lu ", km[i].nfree.load());
cprintf("]\n");
}
void
kmemprint()
{
kmemprint_pool(kmems);
for (int i = 0; i < slab_type_max; i++)
kmemprint_pool(slabmem[i]);
}
static char*
kalloc_pool(struct kmem *km)
{
struct run *r = 0;
struct kmem *m;
scoped_gc_epoch gc;
u32 startcpu = mycpu()->id;
for (u32 i = 0; r == 0 && i < NCPU; i++) {
int cn = (i + startcpu) % NCPU;
m = &km[cn];
acquire(&m->lock);
r = m->freelist;
while (r && !cmpxch_update(&m->freelist, &r, r->next))
; /* spin */
if (r) {
m->freelist = r->next;
m->nfree--;
break;
}
release(&m->lock);
}
if (r == 0) {
cprintf("kalloc: out of memory\n");
cprintf("kalloc: out of memory in pool %s\n", km->name);
kmemprint();
return 0;
}
......@@ -225,18 +237,9 @@ initkalloc(u64 mbaddr)
for (int c = 0; c < NCPU; c++) {
kmems[c].name[0] = (char) c + '0';
safestrcpy(kmems[c].name+1, "kmem", MAXNAME-1);
initlock(&kmems[c].lock, kmems[c].name, LOCKSTAT_KALLOC);
kmems[c].size = PGSIZE;
}
for (int i = 0; i < slab_type_max; i++) {
for (int c = 0; c < NCPU; c++) {
slabmem[i][c].name[0] = (char) c + '0';
initlock(&slabmem[i][c].lock,
slabmem[i][c].name, LOCKSTAT_KALLOC);
}
}
if (VERBOSE)
cprintf("%lu mbytes\n", membytes / (1<<20));
n = membytes / NCPU;
......@@ -247,8 +250,22 @@ initkalloc(u64 mbaddr)
k = (((uptr)p) - KBASE);
for (int c = 0; c < NCPU; c++) {
// Fill slab allocators
for (int i = 0; i < NELEM(slabmem); i++)
strncpy(slabmem[slab_stack][c].name, " kstack", MAXNAME);
slabmem[slab_stack][c].size = KSTACKSIZE;
slabmem[slab_stack][c].ninit = CPUKSTACKS;
strncpy(slabmem[slab_perf][c].name, " kperf", MAXNAME);
slabmem[slab_perf][c].size = PERFSIZE;
slabmem[slab_perf][c].ninit = 1;
strncpy(slabmem[slab_kshared][c].name, " kshared", MAXNAME);
slabmem[slab_kshared][c].size = KSHAREDSIZE;
slabmem[slab_kshared][c].ninit = CPUKSTACKS;
for (int i = 0; i < slab_type_max; i++) {
slabmem[i][c].name[0] = (char) c + '0';
slabinit(&slabmem[i][c], &p, &k);
}
// The rest goes to the page allocator
for (; k != n; k += PGSIZE, p = (char*) memnext(p, PGSIZE)) {
......@@ -287,12 +304,10 @@ verifyfree(char *ptr, u64 nbytes)
// Search for pointers in the ptr region
u64 x = *(uptr *)p;
if (KBASE < x && x < KBASE+(128ull<<30)) {
#if 0 /* maybe once this code is C++ */
struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC)
panic("LOCKSTAT_MAGIC %p(%lu):%p->%p",
ptr, nbytes, p, kls);
#endif
}
}
#endif
......
......@@ -6,7 +6,7 @@
#include "mmu.h"
#include "kernel.hh"
#include "spinlock.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "mtrace.h"
#include "cpu.hh"
......@@ -19,9 +19,8 @@ struct header {
};
struct freelist {
struct header *buckets[KMMAX+1];
std::atomic<header*> buckets[KMMAX+1];
char name[MAXNAME];
struct spinlock lock;
};
struct freelist freelists[NCPU];
......@@ -32,37 +31,35 @@ kminit(void)
for (int c = 0; c < NCPU; c++) {
freelists[c].name[0] = (char) c + '0';
safestrcpy(freelists[c].name+1, "freelist", MAXNAME-1);
initlock(&freelists[c].lock, freelists[c].name, LOCKSTAT_KMALLOC);
}
}
// get more space for freelists[c].buckets[b]
void
int
morecore(int c, int b)
{
char *p = kalloc();
if(p == 0)
return;
return -1;
int sz = 1 << b;
for(char *q = p;
q + sz + sizeof(struct header) <= p + PGSIZE;
q += sz + sizeof(struct header)){
assert(sz >= sizeof(header));
for(char *q = p; q + sz <= p + PGSIZE; q += sz){
struct header *h = (struct header *) q;
h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h;
while (!cmpxch_update(&freelists[c].buckets[b], &h->next, h))
; /* spin */
}
return 0;
}
void *
kmalloc(u64 nbytes)
static int
bucket(u64 nbytes)
{
int nn = 1, b = 0;
void *r = 0;
struct header *h;
int c = mycpu()->id;
u64 nn = 8, b = 3;
while(nn < nbytes && b <= KMMAX){
while(nn < nbytes) {
nn *= 2;
b++;
}
......@@ -71,46 +68,53 @@ kmalloc(u64 nbytes)
if(b > KMMAX)
panic("kmalloc too big");
acquire(&freelists[c].lock);
if(freelists[c].buckets[b] == 0)
morecore(c, b);
h = freelists[c].buckets[b];
if(h){
freelists[c].buckets[b] = h->next;
r = h + 1;
h->next = (header*) (long) b;
}
release(&freelists[c].lock);
if (r)
mtlabel(mtrace_label_heap, r, nbytes, "kmalloc'ed", sizeof("kmalloc'ed"));
if(r == 0)
cprintf("kmalloc(%d) failed\n", (int) nbytes);
return r;
return b;
}
void
kmfree(void *ap)
void *
kmalloc(u64 nbytes)
{
int c = mycpu()->id;
int b = bucket(nbytes);
scoped_gc_epoch gc;
struct header *h;
int b;
int c = mycpu()->id;
acquire(&freelists[c].lock);
for (;;) {
h = freelists[c].buckets[b];
if (!h) {
if (morecore(c, b) < 0) {
cprintf("kmalloc(%d) failed\n", (int) nbytes);
return 0;
}
} else {
if (cmpxch(&freelists[c].buckets[b], h, h->next))
break;
}
}
h = (struct header *) ((char *)ap - sizeof(struct header));
b = (long) h->next;
mtlabel(mtrace_label_heap, (void*) h, nbytes, "kmalloc'ed", sizeof("kmalloc'ed"));
return h;
}
void
kmfree(void *ap, u64 nbytes)
{
int b = bucket(nbytes);
if(b < 0 || b > KMMAX)
panic("kmfree bad bucket");
verifyfree((char*) ap, (1<<b) - sizeof(struct header));
struct header *h = (struct header *) ap;
verifyfree((char *) ap, (1<<b));
if (ALLOC_MEMSET)
memset(ap, 3, (1<<b) - sizeof(struct header));
memset(ap, 3, (1<<b));
int c = mycpu()->id;
h->next = freelists[c].buckets[b];
freelists[c].buckets[b] = h;
while (!cmpxch_update(&freelists[c].buckets[b], &h->next, h))
; /* spin */
mtunlabel(mtrace_label_heap, ap);
release(&freelists[c].lock);
}
int
......@@ -124,7 +128,8 @@ kmalign(void **p, int align, u64 size)
return 0;
}
void kmalignfree(void *mem)
void kmalignfree(void *mem, int align, u64 size)
{
kmfree(((void**)mem)[-1]);
u64 msz = size + (align-1) + sizeof(void*);
kmfree(((void**)mem)[-1], msz);
}
......@@ -2,7 +2,7 @@
#include "multiboot.hh"
#include "kernel.hh"
#include "spinlock.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "cpu.hh"
#include "amd64.h"
#include "hwvm.hh"
......
......@@ -288,7 +288,7 @@ netbind(int sock, void *xaddr, int xaddrlen)
lwip_core_lock();
r = lwip_bind(sock, (const sockaddr*) addr, xaddrlen);
lwip_core_unlock();
kmfree(addr);
kmfree(addr, xaddrlen);
return r;
}
......@@ -322,7 +322,7 @@ netaccept(int sock, void *xaddr, void *xaddrlen)
ss = lwip_accept(sock, (sockaddr*) addr, &len);
lwip_core_unlock();
if (ss < 0) {
kmfree(addr);
kmfree(addr, len);
return ss;
}
......@@ -330,7 +330,7 @@ netaccept(int sock, void *xaddr, void *xaddrlen)
lwip_core_lock();
lwip_close(ss);
lwip_core_unlock();
kmfree(addr);
kmfree(addr, len);
return -1;
}
......
......@@ -30,7 +30,7 @@ pipealloc(struct file **f0, struct file **f1)
*f0 = *f1 = 0;
if((*f0 = filealloc()) == 0 || (*f1 = filealloc()) == 0)
goto bad;
if((p = (struct pipe*)kalloc()) == 0)
if((p = (pipe*)kmalloc(sizeof(*p))) == 0)
goto bad;
p->readopen = 1;
p->writeopen = 1;
......@@ -52,7 +52,7 @@ pipealloc(struct file **f0, struct file **f1)
bad:
if(p) {
destroylock(&p->lock);
kfree((char*)p);
kmfree((char*)p, sizeof(*p));
}
if(*f0)
fileclose(*f0);
......@@ -74,7 +74,7 @@ pipeclose(struct pipe *p, int writable)
if(p->readopen == 0 && p->writeopen == 0){
release(&p->lock);
destroylock(&p->lock);
kfree((char*)p);
kmfree((char*)p, sizeof(*p));
} else
release(&p->lock);
}
......
......@@ -10,7 +10,7 @@
#include "bits.hh"
#include "kmtrace.hh"
#include "sched.hh"
#include "kalloc.h"
#include "kalloc.hh"
#include "vm.hh"
#include "ns.hh"
......@@ -140,25 +140,10 @@ exit(void)
panic("zombie exit");
}
class delayedfree : public rcu_freed {
private:
proc *_p;
public:
delayedfree(proc *p) : rcu_freed("delayed proc free"), _p(p) {}
virtual void do_gc() {
kmfree(_p);
delete this;
}
};
static void
freeproc(struct proc *p)
{
destroylock(&p->lock);
delayedfree *df = new delayedfree(p);
gc_delayed(df);
gc_delayed(p);
}
// Look in the process table for an UNUSED proc.
......@@ -171,25 +156,15 @@ allocproc(void)
struct proc *p;
char *sp;
p = (proc*) kmalloc(sizeof(struct proc));
p = new proc(xnspid->allockey());
if (p == 0) return 0;
memset(p, 0, sizeof(*p));
//set_proc_state(p, EMBRYO);
p->_state = EMBRYO;
p->pid = xnspid->allockey();
p->cpuid = mycpu()->id;
p->on_runq = -1;
p->cpu_pin = 0;
initprocgc(p);
#if MTRACE
p->mtrace_stacks.curr = -1;
#endif
snprintf(p->lockname, sizeof(p->lockname), "cv:proc:%d", p->pid);
initlock(&p->lock, p->lockname+3, LOCKSTAT_PROC);
initcondvar(&p->cv, p->lockname);
initcilkframe(&p->cilkframe);
if (xnspid->insert(p->pid, p) < 0)
......
......@@ -4,7 +4,7 @@
#include "condvar.h"
#include "fs.h"
#include "stat.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "file.hh"
#include "bits.hh"
#include "amd64.h"
......@@ -199,7 +199,7 @@ sampread(struct inode *ip, char *dst, u32 off, u32 n)
cc = MIN(LOGHEADER_SZ-off, n);
memmove(dst, (char*)hdr + off, cc);
kmfree(hdr);
kmfree(hdr, len);
n -= cc;
ret += cc;
......
......@@ -19,6 +19,19 @@ mylockstat(struct spinlock *lk)
{
return &lk->stat->s.cpu[cpunum()];
}
void*
klockstat::operator new(unsigned long nbytes)
{
assert(nbytes == sizeof(klockstat));
return kmalloc(sizeof(klockstat));
}
void
klockstat::operator delete(void *p)
{
return kmfree(p, sizeof(klockstat));
}
#endif
static inline void
......
......@@ -5,7 +5,7 @@
#include "kernel.hh"
#include "bits.hh"
#include "spinlock.h"
#include "kalloc.h"
#include "kalloc.hh"
#include "queue.h"
#include "condvar.h"
#include "proc.hh"
......@@ -300,16 +300,19 @@ vmap::insert(vmnode *n, uptr vma_start, int dotlb)
span.replace(e);
}
updatepages(pml4, e->vma_start, e->vma_end, [](atomic<pme_t> *p) {
bool needtlb = false;
updatepages(pml4, e->vma_start, e->vma_end, [&needtlb](atomic<pme_t> *p) {
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (cmpxch(p, v, (pme_t) 0))
break;
if (v != 0)
needtlb = true;
}
});
if(dotlb)
if (needtlb && dotlb)
tlbflush();
return 0;
}
......@@ -335,16 +338,20 @@ vmap::remove(uptr vma_start, uptr len)
span.replace(0);
}
updatepages(pml4, vma_start, vma_start + len, [](atomic<pme_t> *p) {
bool needtlb = false;
updatepages(pml4, vma_start, vma_start + len, [&needtlb](atomic<pme_t> *p) {
for (;;) {
pme_t v = p->load();
if (v & PTE_LOCK)
continue;
if (cmpxch(p, v, (pme_t) 0))
break;
if (v != 0)
needtlb = true;
}
});
tlbflush();
if (needtlb)
tlbflush();
return 0;
}
......
......@@ -2,3 +2,4 @@ void* memset(void*, int, unsigned int);
void* memcpy(void *dst, const void *src, unsigned int n);
void* memset(void*, int, unsigned int);
unsigned int strlen(const char* str);
int memcmp(const void*, const void*, unsigned int);
......@@ -198,7 +198,7 @@ lwip_thread(void *x)
lwip_core_lock();
lt->thread(lt->arg);
lwip_core_unlock();
kmfree(lt);
kmfree(lt, sizeof(*lt));
}
sys_thread_t
......
......@@ -17,6 +17,7 @@
#define VICTIMAGE 1000000 // cycles a proc executes before an eligible victim
#define VERBOSE 0 // print kernel diagnostics
#define SPINLOCK_DEBUG 1 // Debug spin locks
#define RCU_TYPE_DEBUG 1
#define LOCKSTAT 1
#define VERIFYFREE LOCKSTAT
#define ALLOC_MEMSET 1
......
......@@ -20,6 +20,7 @@ typedef uintptr_t uptr;
#define panic(...) do { printf(__VA_ARGS__); assert(0); } while (0)
#define LOCKSTAT_CRANGE 0
#define LOCKSTAT_GC 0
#define NEW_DELETE_OPS(type)
struct spinlock {
pthread_mutex_t mu;
......@@ -54,6 +55,11 @@ initlock(spinlock *s, const char *m, int lockstat)
}
static inline void
destroylock(spinlock *s)
{
}
static inline void
cv_wakeup(condvar *c)
{
pthread_cond_signal(&c->cv);
......@@ -89,7 +95,7 @@ kmalign(void **ptr, size_t align, size_t size)
}
static inline void
kmalignfree(void *ptr)
kmalignfree(void *ptr, size_t align, size_t size)
{
free(ptr);
}
......
......@@ -80,6 +80,11 @@ threadpin(void (*fn)(void*), void *arg, const char *name, int cpu)
makeproc(p);
}
struct my_range : public range {
my_range(crange *cr, u64 k, u64 sz) : range(cr, k, sz) {}
virtual void do_gc() { delete this; }
};
static pthread_barrier_t worker_b, populate_b;
enum { iter_total = 1000000 };
......@@ -99,7 +104,7 @@ worker(void *arg)
span.replace(0);
} else {
ANON_REGION("worker add", &perfgroup);
span.replace(new range(cr, k, 1));
span.replace(new my_range(cr, k, 1));
}
}
......@@ -111,7 +116,7 @@ populate(void *arg)
{
crange *cr = (crange*) arg;
for (u32 i = 0; i < crange_items; i++)
cr->search_lock(1 + 2*i, 1).replace(new range(cr, 1+2*i, 1));
cr->search_lock(1 + 2*i, 1).replace(new my_range(cr, 1+2*i, 1));
pthread_barrier_wait(&populate_b);
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论