The plumbing to control number of userspace workers

上级 c4dea1b7
#pragma once
struct padded_length {
volatile u64 v_ __mpalign__;;
volatile u64 v_ __mpalign__;
__padout__;
};
// Compile-time max number of workers
#define NWORKERS (NCPU-1)
struct uwq_ipcbuf {
// Run-time max number of workers
u64 maxworkers __mpalign__;
padded_length len[NWORKERS]__mpalign__;
};
#if defined (XV6_KERNEL)
bool uwq_trywork(void);
#define NWORKERS (NCPU-1)
struct uwq;
struct uwq_worker {
......@@ -40,7 +47,7 @@ protected:
virtual void onzero() const;
private:
uwq(vmap* vmap, filetable* ftable, padded_length *len);
uwq(vmap* vmap, filetable* ftable, uwq_ipcbuf *ipc);
~uwq();
uwq& operator=(const uwq&);
uwq(const uwq& x);
......@@ -51,7 +58,7 @@ private:
struct spinlock lock_;
vmap* vmap_;
filetable* ftable_;
padded_length* len_;
uwq_ipcbuf* ipc_;
uptr uentry_;
uptr ustack_;
std::atomic<u64> uref_;
......
......@@ -38,11 +38,12 @@ struct cwork : public work {
#elif defined(XV6_KERNEL)
#define xallocwork(n) kmalloc(n, "xallocwork")
#define xfreework(p, sz) kmfree(p, sz)
#else
#else // xv6 user
extern void* wqalloc(unsigned long nbytes);
extern void wqfree(void *ptr);
#define xallocwork(n) wqalloc(n)
#define xfreework(n, sz) wqfree(n)
extern u64 wq_maxworkers;
#endif
#include "wqfor.hh"
......@@ -28,16 +28,16 @@ allocwq(unsigned long nbytes)
return malloc(nbytes);
}
static inline padded_length*
allocklen(unsigned long nbytes)
static inline uwq_ipcbuf*
allocipc(void)
{
static bool alloced;
if (alloced)
die("allocklen: allocing more than once");
if (nbytes > USERWQSIZE)
die("allocklen: too large");
if (sizeof(uwq_ipcbuf) > USERWQSIZE)
die("allocipc: too large");
alloced = true;
return (padded_length*)USERWQ;
return (uwq_ipcbuf*)USERWQ;
}
static inline void
......@@ -95,3 +95,5 @@ wqarch_init(void)
#define xprintf printf
#define pushcli()
#define popcli()
u64 wq_maxworkers = NWORKERS;
......@@ -104,28 +104,28 @@ uwq_worker::wait(void)
uwq*
uwq::alloc(vmap* vmap, filetable *ftable)
{
padded_length* len;
uwq_ipcbuf* ipc;
uwq* u;
len = (padded_length*) ksalloc(slab_userwq);
if (len == nullptr)
ipc = (uwq_ipcbuf*) ksalloc(slab_userwq);
if (ipc == nullptr)
return nullptr;
ftable->incref();
vmap->incref();
u = new uwq(vmap, ftable, len);
u = new uwq(vmap, ftable, ipc);
if (u == nullptr) {
ftable->decref();
vmap->decref();
ksfree(slab_userwq, len);
ksfree(slab_userwq, ipc);
return nullptr;
}
if (mapkva(vmap->pml4, (char*)len, USERWQ, USERWQSIZE)) {
if (mapkva(vmap->pml4, (char*)ipc, USERWQ, USERWQSIZE)) {
ftable->decref();
vmap->decref();
ksfree(slab_userwq, len);
ksfree(slab_userwq, ipc);
u->dec();
return nullptr;
}
......@@ -133,13 +133,13 @@ uwq::alloc(vmap* vmap, filetable *ftable)
return u;
}
uwq::uwq(vmap* vmap, filetable *ftable, padded_length *len)
uwq::uwq(vmap* vmap, filetable* ftable, uwq_ipcbuf* ipc)
: rcu_freed("uwq"),
vmap_(vmap), ftable_(ftable), len_(len),
vmap_(vmap), ftable_(ftable), ipc_(ipc),
uentry_(0), ustack_(UWQSTACK), uref_(0)
{
for (int i = 0; i < NCPU; i++)
len_[i].v_ = 0;
ipc_->len[i].v_ = 0;
initlock(&lock_, "uwq_lock", 0);
memset(worker_, 0, sizeof(worker_));
......@@ -147,8 +147,8 @@ uwq::uwq(vmap* vmap, filetable *ftable, padded_length *len)
uwq::~uwq(void)
{
if (len_ != nullptr)
ksfree(slab_userwq, len_);
if (ipc_ != nullptr)
ksfree(slab_userwq, ipc_);
vmap_->decref();
ftable_->decref();
}
......@@ -156,11 +156,11 @@ uwq::~uwq(void)
bool
uwq::haswork(void) const
{
if (len_ == nullptr)
if (ipc_ == nullptr)
return false;
for (int i = 0; i < NCPU; i++) {
if (len_[i].v_ > 0) {
if (ipc_->len[i].v_ > 0) {
return true;
}
}
......@@ -201,7 +201,7 @@ uwq::tryworker(void)
}
}
if (slot != -1) {
if (slot != -1 && uref_ < ipc_->maxworkers) {
proc* p = allocworker();
if (p != nullptr) {
uwq_worker* w = new uwq_worker(this, p);
......
......@@ -19,7 +19,7 @@
enum { vm_debug = 0 };
enum { tlb_shootdown = 1 };
enum { tlb_lazy = 1 };
enum { tlb_lazy = 0 };
/*
* vmnode
......
......@@ -42,7 +42,7 @@ private:
percpu<stat> stat_;
#if defined(XV6_USER)
padded_length* len_;
uwq_ipcbuf* ipc_;
#endif
};
......@@ -103,7 +103,9 @@ wq::wq(void)
wqlock_init(&q_[i].lock);
#if defined(XV6_USER)
len_ = allocklen(NCPU*sizeof(padded_length));
ipc_ = allocipc();
assert(wq_maxworkers <= NWORKERS);
ipc_->maxworkers = wq_maxworkers;
#endif
}
......@@ -121,7 +123,7 @@ inline void
wq::inclen(int c)
{
#if defined(XV6_USER)
__sync_fetch_and_add(&len_[c].v_, 1);
__sync_fetch_and_add(&ipc_->len[c].v_, 1);
#endif
}
......@@ -129,7 +131,7 @@ inline void
wq::declen(int c)
{
#if defined(XV6_USER)
__sync_fetch_and_sub(&len_[c].v_, 1);
__sync_fetch_and_sub(&ipc_->len[c].v_, 1);
#endif
}
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论