提交 c90f84ff 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Merge

......@@ -27,10 +27,10 @@ OBJCOPY = $(TOOLPREFIX)objcopy
STRIP = $(TOOLPREFIX)strip
# XXX(sbw)
# -nostdinc -nostdinc++
# -nostdinc -nostdinc++
COMFLAGS = -static -fno-builtin -fno-strict-aliasing -O2 -Wall \
-g -MD -m64 -Werror -fms-extensions -mno-sse \
-g -MD -m64 -Werror -fms-extensions -mno-sse -mcx16 \
-mno-red-zone -Iinclude -I$(QEMUSRC) -fno-omit-frame-pointer \
-DHW_$(HW) -include param.h -include include/compiler.h -DXV6
COMFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
......
......@@ -5,8 +5,6 @@
#include "amd64.h"
#include "uspinlock.h"
static struct uspinlock l;
static volatile int tcount;
enum { readaccess = 0 };
enum { verbose = 0 };
enum { npg = 1 };
......@@ -36,11 +34,6 @@ thr(void *arg)
exit();
}
}
acquire(&l);
// fprintf(1, "mapbench[%d]: done\n", getpid());
tcount++;
release(&l);
}
int
......@@ -55,7 +48,6 @@ main(int ac, char **av)
int nthread = atoi(av[1]);
acquire(&l);
// fprintf(1, "mapbench[%d]: start esp %x, nthread=%d\n", getpid(), rrsp(), nthread);
for(int i = 0; i < nthread; i++) {
......@@ -66,22 +58,10 @@ main(int ac, char **av)
if (0) fprintf(1, "mapbench[%d]: child %d\n", getpid(), tid);
}
for(;;){
int lastc = tcount;
// fprintf(1, "mapbench[%d]: tcount=%d\n", getpid(), lastc);
release(&l);
if(lastc==nthread)
break;
while(tcount==lastc)
__asm __volatile("":::"memory");
acquire(&l);
}
release(&l);
// fprintf(1, "mapbench[%d]: done\n", getpid());
for(int i = 0; i < nthread; i++)
wait();
// fprintf(1, "mapbench[%d]: done\n", getpid());
mtdisable("xv6-mapbench");
// halt();
exit();
......
#pragma once
#if 0
#define __sync_synchronize() do { __asm__ __volatile__("" ::: "memory"); } while (0)
#endif
#define _GLIBCXX_VISIBILITY(x)
#define _GLIBCXX_BEGIN_NAMESPACE_VERSION
#define _GLIBCXX_END_NAMESPACE_VERSION
......
#include "atomic.hh"
template<class T>
struct vptr {
u128 _a;
T ptr() const { return (T) (_a & 0xffffffffffffffffULL); }
u64 v() const { return _a >> 64; }
vptr(T p, u64 v) : _a((((u128)v)<<64) | (u64) p) {}
vptr(u128 a) : _a(a) {}
};
template<class T>
class versioned {
private:
std::atomic<u128> _a;
public:
vptr<T> load() { return vptr<T>(_a.load()); }
bool compare_exchange(const vptr<T> &expected, T desired) {
vptr<T> n(desired, expected.v());
return cmpxch(&_a, expected._a, n._a);
}
};
struct run {
struct run *next;
};
......@@ -8,7 +31,7 @@ struct kmem {
char name[MAXNAME];
u64 size;
u64 ninit;
std::atomic<run*> freelist;
versioned<run*> freelist;
std::atomic<u64> nfree;
} __mpalign__;
......
......@@ -8,14 +8,28 @@ extern "C" {
#include <stdarg.h>
#define KBASE 0xFFFFFF0000000000ull
#define KCODE 0xFFFFFFFFC0000000ull
#define KSHARED 0xFFFFF00000000000ull
#define USERTOP 0x0000800000000000ull
#define KCSEG (2<<3) /* kernel code segment */
#define KDSEG (3<<3) /* kernel data segment */
static inline uptr v2p(void *a) { return (uptr) a - KBASE; }
static inline void *p2v(uptr a) { return (u8 *) a + KBASE; }
static inline uptr v2p(void *a) {
uptr ua = (uptr) a;
if (ua >= KCODE)
return ua - KCODE;
else
return ua - KBASE;
}
static inline void *p2v(uptr a) {
uptr ac = a + KCODE;
if (ac >= KCODE)
return (void*) ac;
else
return (u8 *) a + KBASE;
}
struct trapframe;
struct cilkframe;
......
......@@ -57,9 +57,7 @@ struct proc : public rcu_freed {
SLIST_HEAD(childlist, proc) childq;
SLIST_ENTRY(proc) child_next;
struct condvar cv;
u64 epoch;
struct spinlock gc_epoch_lock;
u64 epoch_depth;
std::atomic<u64> epoch; // low 8 bits are depth count
char lockname[16];
int on_runq;
int cpu_pin;
......@@ -77,12 +75,11 @@ struct proc : public rcu_freed {
proc(int npid) : rcu_freed("proc"), vmap(0), brk(0), kstack(0),
_state(EMBRYO), pid(npid), parent(0), tf(0), context(0), killed(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0), epoch_depth(0),
cwd(0), tsc(0), curcycles(0), cpuid(0), epoch(0),
on_runq(-1), cpu_pin(0), runq(0), oncv(0), cv_wakeup(0)
{
snprintf(lockname, sizeof(lockname), "cv:proc:%d", pid);
initlock(&lock, lockname+3, LOCKSTAT_PROC);
initlock(&gc_epoch_lock, lockname+3, LOCKSTAT_PROC);
initcondvar(&cv, lockname);
memset(&childq, 0, sizeof(childq));
......@@ -95,7 +92,6 @@ struct proc : public rcu_freed {
~proc() {
destroylock(&lock);
destroylock(&gc_epoch_lock);
destroycondvar(&cv);
}
......
......@@ -6,6 +6,10 @@ typedef unsigned int u32;
typedef int s32;
typedef unsigned long u64;
typedef long s64;
#ifdef XV6
typedef unsigned __int128 u128;
typedef __int128 s128;
#endif
typedef u64 uptr;
typedef uptr paddr;
......
......@@ -64,8 +64,8 @@ $(O)/kernel/%.o: lib/%.cc
$(Q)mkdir -p $(@D)
$(Q)$(CXX) $(CXXFLAGS) -c -o $@ $<
$(O)/kernel/%.o: CFLAGS+=-mcmodel=large
$(O)/kernel/%.o: CXXFLAGS+=-mcmodel=large
$(O)/kernel/%.o: CFLAGS+=-mcmodel=kernel
$(O)/kernel/%.o: CXXFLAGS+=-mcmodel=kernel
$(O)/kernel/incbin.o: ASFLAGS+=-DMAKE_OUT=$(O)
$(O)/kernel/incbin.o: $(O)/kernel/initcode $(O)/kernel/bootother $(O)/fs.img
......
......@@ -6,17 +6,17 @@
#include "mmu.h"
# KADDR is the kernel virtual address of the first byte of physical memory.
# The linker loads the executable as if starting at KBASE+2MB, but we
# The linker loads the executable as if starting at KCODE+2MB, but we
# ask the loader to load the kernel at physical 2MB and then set up the
# necessary memory mapping to switch to the higher address.
# The value of KBASE must match the definitions in kernel.h and kernel.ld.
#define KBASE 0xFFFFFF0000000000
# The value of KCODE must match the definitions in kernel.h and kernel.ld.
#define KCODE 0xFFFFFFFFC0000000
# PADDR(x) is the physical memory address corresponding to x.
# Until we set up the memory map, fairly late in this file, we have to
# refer to PADDR(symbol) instead of symbol, so that we use the
# physical address.
#define PADDR(x) ((x) - KBASE)
#define PADDR(x) ((x) - KCODE)
# STACK is the size of the bootstrap stack.
#define STACK 8192
......@@ -126,7 +126,7 @@ tramp64:
# running at PADDR(tramp64), so use an explicit calculation to
# load and jump to the correct address. %rax should hold the
# physical address of the jmp target.
movq $KBASE, %r11
movq $KCODE, %r11
addq %r11, %rax
jmp *%rax
......@@ -134,7 +134,7 @@ tramp64:
.comm stack, STACK
# Page tables. See section 4.5 of 253668.pdf.
# We map the first GB of physical memory at 0 and at KBASE. At boot
# We map the first GB of physical memory at 0 and at KCODE. At boot
# time we are using the mapping at 0 but during ordinary execution we
# use the high mapping.
# The intent is that after bootstrap the kernel can expand this mapping
......@@ -145,10 +145,8 @@ tramp64:
.global kpml4
kpml4:
.quad PADDR(pdpt0) + (1<<0) + (1<<1) // present, read/write
.quad 0
.space 4096 - 2*16
.space 4096 - 2*8
.quad PADDR(pdpt1) + (1<<0) + (1<<1) // present, read/write
.quad 0
.align 4096
pdpt0:
.quad PADDR(pdt) + (1<<0) + (1<<1) // present, read/write
......@@ -156,8 +154,8 @@ pdpt0:
.align 4096
pdpt1:
.quad PADDR(pdt) + (1<<0) + (1<<1) // present, read/write
.space 4096 - 8
.quad PADDR(pdt) + (1<<0) + (1<<1) // present, read/write
.align 4096
pdt:
......
......@@ -23,3 +23,30 @@ __cxa_pure_virtual(void)
{
panic("__cxa_pure_virtual");
}
namespace std {
template<>
u128
atomic<u128>::load(memory_order __m) const
{
__sync_synchronize();
u128 v = _M_i;
__sync_synchronize();
return v;
}
template<>
bool
atomic<u128>::compare_exchange_weak(u128 &__i1, u128 i2, memory_order __m)
{
// XXX no __sync_val_compare_and_swap for u128
u128 o = __i1;
bool ok = __sync_bool_compare_and_swap(&_M_i, o, i2);
if (!ok)
__i1 = _M_i;
return ok;
}
};
......@@ -84,7 +84,6 @@ static void dostack(uptr a0, u64 a1)
goto bad;
if(args->vmap->insert(vmn, USERTOP-(USTACKPAGES*PGSIZE), 1) < 0)
goto bad;
vmn = 0;
// Push argument strings, prepare rest of stack in ustack.
sp = USERTOP;
......@@ -135,7 +134,6 @@ static void doheap(uptr a0, u64 a1)
goto bad;
if(args->vmap->insert(vmn, BRK, 1) < 0)
goto bad;
vmn = 0;
prof_end(doheap_prof);
return;
......@@ -149,7 +147,6 @@ exec(char *path, char **argv)
{
struct inode *ip = NULL;
struct vmap *vmp = NULL;
struct vmnode *vmn = NULL;
struct elfhdr elf;
struct proghdr ph;
u64 off;
......@@ -227,8 +224,6 @@ exec(char *path, char **argv)
cprintf("exec failed\n");
if(vmp)
vmp->decref();
if(vmn)
delete vmn;
gc_end_epoch();
return 0;
......
......@@ -51,16 +51,22 @@ gc_free_tofreelist(atomic<rcu_freed*> *head, u64 epoch)
int nfree = 0;
rcu_freed *r, *nr;
for (r = *head; r != NULL; r = nr) {
r = *head;
while (!std::atomic_compare_exchange_strong(head, &r, (rcu_freed*) 0))
; /* spin */
for (; r; r = nr) {
if (r->_rcu_epoch > epoch) {
cprintf("gc_free_tofreelist: r->epoch %ld > epoch %ld\n", r->_rcu_epoch, epoch);
#if RCU_TYPE_DEBUG
cprintf("gc_free_tofreelist: name %s\n", r->_rcu_type);
#endif
assert(0);
}
nr = r->_rcu_next;
r->do_gc();
nfree++;
}
*head = r;
return nfree;
}
......@@ -89,6 +95,7 @@ gc_move_to_tofree_cpu(int c, u64 epoch)
// move delayed NEPOCH's adhead
gc_state[c].delayed[fe].epoch += NEPOCH;
assert(gc_state[c].delayed[fe].head == 0);
// XXX race with gc_delayed()?
return 0;
}
......@@ -125,20 +132,22 @@ gc_delayfreelist(void)
if (gc_debug) {
cprintf("(%d,%d) (%s): min %lu global %lu\n", myproc()->cpuid, myproc()->pid, myproc()->name, min, global);
}
myproc()->epoch_depth++; // ensure enumerate's call to gc_begin_epoch doesn't have sideeffects
myproc()->epoch++; // ensure enumerate's call to gc_begin_epoch doesn't have sideeffects
xnspid->enumerate([&min](u32, proc *p)->bool{
// Some threads may never call begin/end_epoch(), and never update
// p->epoch, so gc_thread does it for them. XXX get rid off lock?
acquire(&p->gc_epoch_lock);
if (p->epoch_depth == 0)
p->epoch = global_epoch;
release(&p->gc_epoch_lock);
// p->epoch, so gc_thread does it for them.
u64 x = p->epoch.load();
if (!(x & 0xff)) {
cmpxch(&p->epoch, x, global_epoch.load() << 8);
x = p->epoch.load();
}
// cprintf("gc_min %d(%s): %lu %ld\n", p->pid, p->name, p->epoch, p->epoch_depth);
if (min > p->epoch)
min = p->epoch;
if (min > (x>>8))
min = (x>>8);
return false;
});
myproc()->epoch_depth--;
myproc()->epoch--;
if (min >= global) {
gc_move_to_tofree(min);
}
......@@ -148,10 +157,10 @@ gc_delayfreelist(void)
void
gc_delayed(rcu_freed *e)
{
gc_state[mycpu()->id].ndelayed++;
pushcli();
int c = mycpu()->id;
u64 myepoch = myproc()->epoch;
gc_state[c].ndelayed++;
u64 myepoch = (myproc()->epoch >> 8);
u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch;
if (gc_debug)
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid,
......@@ -163,30 +172,26 @@ gc_delayed(rcu_freed *e)
e->_rcu_epoch = myepoch;
e->_rcu_next = gc_state[c].delayed[myepoch % NEPOCH].head;
while (!cmpxch_update(&gc_state[c].delayed[myepoch % NEPOCH].head, &e->_rcu_next, e)) {}
popcli();
}
void
gc_begin_epoch(void)
{
if (myproc() == NULL) return;
acquire(&myproc()->gc_epoch_lock);
if (myproc()->epoch_depth++ > 0)
goto done;
myproc()->epoch = global_epoch; // not atomic, but it never goes backwards
u64 v = myproc()->epoch++;
if (v & 0xff)
return;
cmpxch(&myproc()->epoch, v+1, (global_epoch.load()<<8)+1);
// __sync_synchronize();
done:
release(&myproc()->gc_epoch_lock);
}
void
gc_end_epoch(void)
{
if (myproc() == NULL) return;
acquire(&myproc()->gc_epoch_lock);
--myproc()->epoch_depth;
release(&myproc()->gc_epoch_lock);
if (myproc()->epoch_depth == 0 && gc_state[mycpu()->id].ndelayed > NGC)
u64 e = --myproc()->epoch;
if ((e & 0xff) == 0 && gc_state[mycpu()->id].ndelayed > NGC)
cv_wakeup(&gc_state[mycpu()->id].cv);
}
......@@ -214,7 +219,7 @@ gc_worker(void *x)
release(&wl);
gc_state[mycpu()->id].nrun++;
u64 global = global_epoch;
myproc()->epoch = global_epoch; // move the gc thread to next epoch
myproc()->epoch = global_epoch.load() << 8; // move the gc thread to next epoch
for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) {
int nfree = gc_free_tofreelist(&gc_state[mycpu()->id].tofree[i%NEPOCH].head, i);
gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH;
......@@ -232,9 +237,7 @@ gc_worker(void *x)
void
initprocgc(struct proc *p)
{
p->epoch = global_epoch;
p->epoch_depth = 0;
initlock(&p->gc_epoch_lock, "per process gc_lock", 0);
p->epoch = global_epoch.load() << 8;
}
void
......
......@@ -63,7 +63,6 @@ walkpgdir(pgmap *pml4, u64 va, int create)
void
initpg(void)
{
extern char end[];
u64 va = KBASE;
paddr pa = 0;
......@@ -71,12 +70,9 @@ initpg(void)
auto pdp = descend(&kpml4, va, 0, 1, 3);
auto pd = descend(pdp, va, 0, 1, 2);
atomic<pme_t> *sp = &pd->e[PX(1,va)];
u64 flags = PTE_W | PTE_P | PTE_PS;
// Set NX for non-code pages
if (va >= (u64) end)
flags |= PTE_NX;
u64 flags = PTE_W | PTE_P | PTE_PS | PTE_NX;
*sp = pa | flags;
va = va + PGSIZE*512;
va += PGSIZE*512;
pa += PGSIZE*512;
}
}
......@@ -196,22 +192,15 @@ tlbflush()
{
u64 myreq = tlbflush_req++;
pushcli();
// the caller may not hold any spinlock, because other CPUs might
// be spinning waiting for that spinlock, with interrupts disabled,
// so we will deadlock waiting for their TLB flush..
assert(mycpu()->ncli == 1);
int myid = mycpu()->id;
lcr3(rcr3());
popcli();
assert(mycpu()->ncli == 0);
for (int i = 0; i < ncpu; i++)
if (i != myid)
lapic_tlbflush(i);
lapic_tlbflush(i);
for (int i = 0; i < ncpu; i++)
if (i != myid)
while (cpus[i].tlbflush_done < myreq)
/* spin */ ;
while (cpus[i].tlbflush_done < myreq)
/* spin */ ;
}
......@@ -118,8 +118,6 @@ kfree_pool(struct kmem *m, char *v)
if ((uptr)v % PGSIZE)
panic("kfree_pool: misaligned %p", v);
if (v < end)
panic("kfree_pool: less than end %p", v);
if (memsize(v) == -1ull)
panic("kfree_pool: unknown region %p", v);
......@@ -128,9 +126,13 @@ kfree_pool(struct kmem *m, char *v)
memset(v, 1, m->size);
r = (struct run*)v;
r->next = m->freelist;
while (!cmpxch_update(&m->freelist, &r->next, r))
; /* spin */
for (;;) {
auto headval = m->freelist.load();
r->next = headval.ptr();
if (m->freelist.compare_exchange(headval, r))
break;
}
m->nfree++;
if (kinited)
mtunlabel(mtrace_label_block, r);
......@@ -169,9 +171,19 @@ kalloc_pool(struct kmem *km)
int cn = (i + startcpu) % NCPU;
m = &km[cn];
r = m->freelist;
while (r && !cmpxch_update(&m->freelist, &r, r->next))
; /* spin */
for (;;) {
auto headval = m->freelist.load();
r = headval.ptr();
if (!r)
break;
run *nxt = r->next;
if (m->freelist.compare_exchange(headval, nxt)) {
if (r->next != nxt)
panic("kalloc_pool: aba race %p %p %p\n", r, r->next, nxt);
break;
}
}
if (r) {
m->nfree--;
......@@ -247,7 +259,8 @@ initkalloc(u64 mbaddr)
n = PGROUNDDOWN(n);
p = (char*)PGROUNDUP((uptr)newend);
k = (((uptr)p) - KBASE);
k = (((uptr)p) - KCODE);
p = (char*) KBASE + k;
for (int c = 0; c < NCPU; c++) {
// Fill slab allocators
strncpy(slabmem[slab_stack][c].name, " kstack", MAXNAME);
......@@ -303,7 +316,7 @@ verifyfree(char *ptr, u64 nbytes)
for (; p < e; p++) {
// Search for pointers in the ptr region
u64 x = *(uptr *)p;
if (KBASE < x && x < KBASE+(128ull<<30)) {
if ((KBASE < x && x < KBASE+(128ull<<30)) || (KCODE < x)) {
struct klockstat *kls = (struct klockstat *) x;
if (kls->magic == LOCKSTAT_MAGIC)
panic("LOCKSTAT_MAGIC %p(%lu):%p->%p",
......
......@@ -4,7 +4,7 @@ ENTRY(xxx)
SECTIONS
{
. = 0xFFFFFF0000100000;
. = 0xFFFFFFFFC0100000;
PROVIDE(text = .);
.text : AT(0x100000) {
*(.text .stub .text.* .gnu.linkonce.t.*)
......
......@@ -19,7 +19,7 @@ struct header {
};
struct freelist {
std::atomic<header*> buckets[KMMAX+1];
versioned<header*> buckets[KMMAX+1];
char name[MAXNAME];
};
......@@ -46,9 +46,12 @@ morecore(int c, int b)
assert(sz >= sizeof(header));
for(char *q = p; q + sz <= p + PGSIZE; q += sz){
struct header *h = (struct header *) q;
h->next = freelists[c].buckets[b];
while (!cmpxch_update(&freelists[c].buckets[b], &h->next, h))
; /* spin */
for (;;) {
auto headptr = freelists[c].buckets[b].load();
h->next = headptr.ptr();
if (freelists[c].buckets[b].compare_exchange(headptr, h))
break;
}
}
return 0;
......@@ -66,7 +69,7 @@ bucket(u64 nbytes)
if(nn != (1 << b))
panic("kmalloc oops");
if(b > KMMAX)
panic("kmalloc too big");
panic("kmalloc too big %ld", nbytes);
return b;
}
......@@ -81,18 +84,26 @@ kmalloc(u64 nbytes)
int c = mycpu()->id;
for (;;) {
h = freelists[c].buckets[b];
auto headptr = freelists[c].buckets[b].load();
h = headptr.ptr();
if (!h) {
if (morecore(c, b) < 0) {
cprintf("kmalloc(%d) failed\n", (int) nbytes);
return 0;
}
} else {
if (cmpxch(&freelists[c].buckets[b], h, h->next))
header *nxt = h->next;
if (freelists[c].buckets[b].compare_exchange(headptr, nxt)) {
if (h->next != nxt)
panic("kmalloc: aba race");
break;
}
}
}
if (ALLOC_MEMSET)
memset(h, 4, (1<<b));
mtlabel(mtrace_label_heap, (void*) h, nbytes, "kmalloc'ed", sizeof("kmalloc'ed"));
return h;
}
......@@ -110,9 +121,12 @@ kmfree(void *ap, u64 nbytes)
memset(ap, 3, (1<<b));
int c = mycpu()->id;
h->next = freelists[c].buckets[b];
while (!cmpxch_update(&freelists[c].buckets[b], &h->next, h))
; /* spin */
for (;;) {
auto headptr = freelists[c].buckets[b].load();
h->next = headptr.ptr();
if (freelists[c].buckets[b].compare_exchange(headptr, h))
break;
}
mtunlabel(mtrace_label_heap, ap);
}
......
......@@ -83,7 +83,7 @@ sampstart(void)
{
pushcli();
for(struct cpu *c = cpus; c < cpus+ncpu; c++) {
if(c == cpus+cpunum())
if(c == cpus+mycpu()->id)
continue;
lapic_sampconf(c->id);
}
......@@ -95,7 +95,7 @@ static int
samplog(struct trapframe *tf)
{
struct pmulog *l;
l = &pmulog[cpunum()];
l = &pmulog[mycpu()->id];
if (l->count == l->capacity)
return 0;
......@@ -199,7 +199,7 @@ sampread(struct inode *ip, char *dst, u32 off, u32 n)
cc = MIN(LOGHEADER_SZ-off, n);
memmove(dst, (char*)hdr + off, cc);
kmfree(hdr, len);
kmfree(hdr, LOGHEADER_SZ);
n -= cc;
ret += cc;
......
......@@ -60,7 +60,7 @@ steal(void)
pushcli();
for (i = 1; i < ncpu; i++) {
struct runq *q = &runq[(i+cpunum()) % ncpu];
struct runq *q = &runq[(i+mycpu()->id) % ncpu];
struct proc *p;
// XXX(sbw) Look for a process to steal. Acquiring q->lock
......@@ -112,7 +112,7 @@ schednext(void)
struct proc *p = NULL;
pushcli();
q = &runq[cpunum()];
q = &runq[mycpu()->id];
acquire(&q->lock);
p = STAILQ_LAST(&q->q, proc, runqlink);
if (p)
......
......@@ -17,7 +17,7 @@ static int lockstat_enable;
static inline struct cpulockstat *
mylockstat(struct spinlock *lk)
{
return &lk->stat->s.cpu[cpunum()];
return &lk->stat->s.cpu[mycpu()->id];
}
void*
......
......@@ -25,22 +25,18 @@ vmnode::vmnode(u64 npg, vmntype ntype, inode *i, u64 off, u64 s)
{
if (npg > NELEM(page))
panic("vmnode too big\n");
memset(page, 0, sizeof(page));
if (type == EAGER) {
memset(page, 0, npg * sizeof(page[0]));
if (type == EAGER && ip) {
assert(allocpg() == 0);
if (ip)
assert(demand_load() == 0);
assert(demand_load() == 0);
}
}
vmnode::~vmnode()
{
for(u64 i = 0; i < npages; i++) {
if (page[i]) {
for(u64 i = 0; i < npages; i++)
if (page[i])
kfree(page[i]);
page[i] = 0;
}
}
if (ip)
iput(ip);
}
......@@ -419,12 +415,13 @@ vmap::pagefault(uptr va, u32 err)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->n && m->n->type == ONDEMAND && m->n->page[npg] == 0) {
if (m->n && !m->n->page[npg])
if (m->n->allocpg() < 0)
panic("pagefault: couldn't allocate pages");
if (m->n && m->n->type == ONDEMAND)
if (m->n->demand_load() < 0)
panic("pagefault: couldn't load");
}
if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(m) < 0)
......@@ -471,6 +468,7 @@ vmap::copyout(uptr va, void *p, u64 len)
if(vma == 0)
return -1;
vma->n->allocpg();
uptr pn = (va0 - vma->vma_start) / PGSIZE;
char *p0 = vma->n->page[pn];
if(p0 == 0)
......
......@@ -31,7 +31,7 @@ static inline struct wqueue *
getwq(void)
{
pushcli();
return &queue[cpunum()];
return &queue[mycpu()->id];
}
static inline void
......@@ -43,7 +43,7 @@ putwq(struct wqueue *wq)
static inline struct wqstat *
wq_stat(void)
{
return &stat[cpunum()];
return &stat[mycpu()->id];
}
struct work *
......
......@@ -19,7 +19,7 @@
#define SPINLOCK_DEBUG 1 // Debug spin locks
#define RCU_TYPE_DEBUG 1
#define LOCKSTAT 1
#define VERIFYFREE LOCKSTAT
#define VERIFYFREE 0 // Unreliable, e.g. vma's vmnode pointer gets reused
#define ALLOC_MEMSET 1
#define KSHAREDSIZE (32 << 10)
#define WQSHIFT 4
......
......@@ -101,9 +101,7 @@ kmalignfree(void *ptr, size_t align, size_t size)
}
struct proc {
spinlock gc_epoch_lock;
u64 epoch;
u64 epoch_depth;
std::atomic<u64> epoch;
u32 cpuid;
u32 pid;
char name[32];
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论