Start of kernel support for user-level wq

上级 b8573e56
......@@ -27,9 +27,8 @@ UPROGS= \
cp \
perf \
xls \
xdu
# pdu
# pls
xdu \
wqtest
ifeq ($(HAVE_LWIP),y)
UPROGS += \
......
#include "types.h"
#include "user.h"
#include "lib.h"
#include "amd64.h"
#include "wq.hh"
struct testwork : public work {
testwork(forframe *b) : barrier_(b) {}
virtual void run() {
barrier_->dec();
delete this;
}
static void* operator new(unsigned long nbytes) {
assert(nbytes == sizeof(testwork));
return xmalloc(sizeof(testwork));
}
static void operator delete(void*p) {
xfree(p, sizeof(testwork));
}
struct forframe *barrier_;
};
static void
test(void)
{
enum { pushes = 100 };
struct forframe wqbarrier(pushes);
for (int i = 0; i < pushes; i++) {
testwork *w = new testwork(&wqbarrier);
wq_push(w);
}
while (!wqbarrier.zero())
nop_pause();
}
int
main(int ac, char **av)
{
initwq();
sleep(5);
test();
for (;;);
exitwq();
printf("all done!\n");
return 0;
}
......@@ -10,7 +10,7 @@ extern pgmap kpml4;
void freevm(pgmap *pml4);
pgmap* setupkvm(void);
int setupkshared(pgmap *pml4, char *kshared);
int setupuvm(pgmap *pml4, char *kshared, char *uwq);
std::atomic<pme_t>* walkpgdir(pgmap *pml4, u64, int);
void tlbflush(void);
......
......@@ -63,6 +63,7 @@ enum {
slab_perf,
slab_kshared,
slab_wq,
slab_userwq,
slab_type_max
};
......
......@@ -5,13 +5,9 @@ extern "C" {
}
#include "atomic.hh"
#include "memlayout.h"
#include <stdarg.h>
#define KBASE 0xFFFFFF0000000000ull
#define KCODE 0xFFFFFFFFC0000000ull
#define KSHARED 0xFFFFF00000000000ull
#define USERTOP 0x0000800000000000ull
#define KCSEG (2<<3) /* kernel code segment */
#define KDSEG (3<<3) /* kernel data segment */
......
#define KBASE 0xFFFFFF0000000000ull
#define KCODE 0xFFFFFFFFC0000000ull
#define KSHARED 0xFFFFF00000000000ull
#define USERWQ 0xFFFFF00100000000ull
#define USERTOP 0x0000800000000000ull
......@@ -9,6 +9,8 @@
#define VM_CRANGE 1
#define VM_RADIX 0
class wq;
using std::atomic;
// A memory object (physical pages or inode).
......@@ -94,5 +96,7 @@ struct vmap {
private:
int pagefault_wcow(vma *m);
wq *const uwq_;
struct spinlock brklock_;
};
......@@ -4,6 +4,7 @@
#include "user.h"
#include "wq.hh"
#include "pthread.h"
#include "memlayout.h"
typedef struct uspinlock wqlock_t;
......@@ -19,7 +20,11 @@ mycpuid(void)
static inline void*
allocwq(unsigned long nbytes)
{
return malloc(nbytes);
static bool alloced;
if (alloced)
die("allocwq: allocing more than once");
alloced = true;
return (void*)USERWQ;
}
static inline void
......
......@@ -10,6 +10,7 @@
#include "condvar.h"
#include "proc.hh"
#include "vm.hh"
#include "wq.hh"
using namespace std;
......@@ -92,13 +93,24 @@ setupkvm(void)
}
int
setupkshared(pgmap *pml4, char *kshared)
setupuvm(pgmap *pml4, char *kshared, char *uwq)
{
for (u64 off = 0; off < KSHAREDSIZE; off+=4096) {
atomic<pme_t> *pte = walkpgdir(pml4, (u64) (KSHARED+off), 1);
if (pte == nullptr)
panic("setupkshared: oops");
*pte = v2p(kshared+off) | PTE_P | PTE_U | PTE_W;
struct todo {
char *kvm;
char *uvm;
size_t size;
} todo[] = {
{ kshared, (char*)KSHARED, KSHAREDSIZE },
{ uwq, (char*)USERWQ, PGROUNDUP(wq_size()) }
};
for (int i = 0; i < NELEM(todo); i++) {
for (u64 off = 0; off < todo[i].size; off+=4096) {
atomic<pme_t> *pte = walkpgdir(pml4, (u64) (todo[i].uvm+off), 1);
if (pte == nullptr)
return -1;
*pte = v2p(todo[i].kvm+off) | PTE_P | PTE_U | PTE_W;
}
}
return 0;
}
......
......@@ -278,6 +278,10 @@ initkalloc(u64 mbaddr)
slabmem[slab_wq][c].size = PGROUNDUP(wq_size());
slabmem[slab_wq][c].ninit = NCPU;
strncpy(slabmem[slab_userwq][c].name, " uwq", MAXNAME);
slabmem[slab_userwq][c].size = PGROUNDUP(wq_size());
slabmem[slab_userwq][c].ninit = CPUKSTACKS;
for (int i = 0; i < slab_type_max; i++) {
slabmem[i][c].name[0] = (char) c + '0';
slabinit(&slabmem[i][c], &p, &k);
......
......@@ -140,13 +140,13 @@ vma::~vma()
vmap::vmap() :
#if VM_CRANGE
cr(10),
cr(10),
#endif
#if VM_RADIX
rx(PGSHIFT),
rx(PGSHIFT),
#endif
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)),
brk_(0)
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)),
brk_(0), uwq_((wq*) ksalloc(slab_userwq))
{
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
if (pml4 == 0) {
......@@ -159,7 +159,12 @@ vmap::vmap() :
goto err;
}
if (setupkshared(pml4, kshared)) {
if (uwq_ == nullptr) {
cprintf("vmap::vmap: userwq out of memory\n");
goto err;
}
if (setupuvm(pml4, kshared, (char*)uwq_)) {
cprintf("vmap::vmap: setupkshared out of memory\n");
goto err;
}
......@@ -169,6 +174,8 @@ vmap::vmap() :
err:
if (kshared)
ksfree(slab_kshared, kshared);
if (uwq_)
ksfree(slab_userwq, uwq_);
if (pml4)
freevm(pml4);
}
......@@ -177,6 +184,8 @@ vmap::~vmap()
{
if (kshared)
ksfree(slab_kshared, kshared);
if (uwq_)
ksfree(slab_userwq, uwq_);
if (pml4)
freevm(pml4);
}
......
......@@ -55,7 +55,7 @@ pthread_getspecific(pthread_key_t key)
int
pthread_setspecific(pthread_key_t key, void* value)
{
__asm volatile("movq %0, %%fs:(%1)" : : "r" (value), "r" ((u64) key * 8));
__asm volatile("movq %0, %%fs:(%1)" : : "r" (value), "r" ((u64) key * 8) : "memory");
return 0;
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论