The plumbing to control number of userspace workers

上级 c4dea1b7
#pragma once #pragma once
struct padded_length { struct padded_length {
volatile u64 v_ __mpalign__;; volatile u64 v_ __mpalign__;
__padout__; __padout__;
}; };
// Compile-time max number of workers
#define NWORKERS (NCPU-1)
struct uwq_ipcbuf {
// Run-time max number of workers
u64 maxworkers __mpalign__;
padded_length len[NWORKERS]__mpalign__;
};
#if defined (XV6_KERNEL) #if defined (XV6_KERNEL)
bool uwq_trywork(void); bool uwq_trywork(void);
#define NWORKERS (NCPU-1)
struct uwq; struct uwq;
struct uwq_worker { struct uwq_worker {
...@@ -40,7 +47,7 @@ protected: ...@@ -40,7 +47,7 @@ protected:
virtual void onzero() const; virtual void onzero() const;
private: private:
uwq(vmap* vmap, filetable* ftable, padded_length *len); uwq(vmap* vmap, filetable* ftable, uwq_ipcbuf *ipc);
~uwq(); ~uwq();
uwq& operator=(const uwq&); uwq& operator=(const uwq&);
uwq(const uwq& x); uwq(const uwq& x);
...@@ -51,7 +58,7 @@ private: ...@@ -51,7 +58,7 @@ private:
struct spinlock lock_; struct spinlock lock_;
vmap* vmap_; vmap* vmap_;
filetable* ftable_; filetable* ftable_;
padded_length* len_; uwq_ipcbuf* ipc_;
uptr uentry_; uptr uentry_;
uptr ustack_; uptr ustack_;
std::atomic<u64> uref_; std::atomic<u64> uref_;
......
...@@ -38,11 +38,12 @@ struct cwork : public work { ...@@ -38,11 +38,12 @@ struct cwork : public work {
#elif defined(XV6_KERNEL) #elif defined(XV6_KERNEL)
#define xallocwork(n) kmalloc(n, "xallocwork") #define xallocwork(n) kmalloc(n, "xallocwork")
#define xfreework(p, sz) kmfree(p, sz) #define xfreework(p, sz) kmfree(p, sz)
#else #else // xv6 user
extern void* wqalloc(unsigned long nbytes); extern void* wqalloc(unsigned long nbytes);
extern void wqfree(void *ptr); extern void wqfree(void *ptr);
#define xallocwork(n) wqalloc(n) #define xallocwork(n) wqalloc(n)
#define xfreework(n, sz) wqfree(n) #define xfreework(n, sz) wqfree(n)
extern u64 wq_maxworkers;
#endif #endif
#include "wqfor.hh" #include "wqfor.hh"
...@@ -28,16 +28,16 @@ allocwq(unsigned long nbytes) ...@@ -28,16 +28,16 @@ allocwq(unsigned long nbytes)
return malloc(nbytes); return malloc(nbytes);
} }
static inline padded_length* static inline uwq_ipcbuf*
allocklen(unsigned long nbytes) allocipc(void)
{ {
static bool alloced; static bool alloced;
if (alloced) if (alloced)
die("allocklen: allocing more than once"); die("allocklen: allocing more than once");
if (nbytes > USERWQSIZE) if (sizeof(uwq_ipcbuf) > USERWQSIZE)
die("allocklen: too large"); die("allocipc: too large");
alloced = true; alloced = true;
return (padded_length*)USERWQ; return (uwq_ipcbuf*)USERWQ;
} }
static inline void static inline void
...@@ -95,3 +95,5 @@ wqarch_init(void) ...@@ -95,3 +95,5 @@ wqarch_init(void)
#define xprintf printf #define xprintf printf
#define pushcli() #define pushcli()
#define popcli() #define popcli()
u64 wq_maxworkers = NWORKERS;
...@@ -104,28 +104,28 @@ uwq_worker::wait(void) ...@@ -104,28 +104,28 @@ uwq_worker::wait(void)
uwq* uwq*
uwq::alloc(vmap* vmap, filetable *ftable) uwq::alloc(vmap* vmap, filetable *ftable)
{ {
padded_length* len; uwq_ipcbuf* ipc;
uwq* u; uwq* u;
len = (padded_length*) ksalloc(slab_userwq); ipc = (uwq_ipcbuf*) ksalloc(slab_userwq);
if (len == nullptr) if (ipc == nullptr)
return nullptr; return nullptr;
ftable->incref(); ftable->incref();
vmap->incref(); vmap->incref();
u = new uwq(vmap, ftable, len); u = new uwq(vmap, ftable, ipc);
if (u == nullptr) { if (u == nullptr) {
ftable->decref(); ftable->decref();
vmap->decref(); vmap->decref();
ksfree(slab_userwq, len); ksfree(slab_userwq, ipc);
return nullptr; return nullptr;
} }
if (mapkva(vmap->pml4, (char*)len, USERWQ, USERWQSIZE)) { if (mapkva(vmap->pml4, (char*)ipc, USERWQ, USERWQSIZE)) {
ftable->decref(); ftable->decref();
vmap->decref(); vmap->decref();
ksfree(slab_userwq, len); ksfree(slab_userwq, ipc);
u->dec(); u->dec();
return nullptr; return nullptr;
} }
...@@ -133,13 +133,13 @@ uwq::alloc(vmap* vmap, filetable *ftable) ...@@ -133,13 +133,13 @@ uwq::alloc(vmap* vmap, filetable *ftable)
return u; return u;
} }
uwq::uwq(vmap* vmap, filetable *ftable, padded_length *len) uwq::uwq(vmap* vmap, filetable* ftable, uwq_ipcbuf* ipc)
: rcu_freed("uwq"), : rcu_freed("uwq"),
vmap_(vmap), ftable_(ftable), len_(len), vmap_(vmap), ftable_(ftable), ipc_(ipc),
uentry_(0), ustack_(UWQSTACK), uref_(0) uentry_(0), ustack_(UWQSTACK), uref_(0)
{ {
for (int i = 0; i < NCPU; i++) for (int i = 0; i < NCPU; i++)
len_[i].v_ = 0; ipc_->len[i].v_ = 0;
initlock(&lock_, "uwq_lock", 0); initlock(&lock_, "uwq_lock", 0);
memset(worker_, 0, sizeof(worker_)); memset(worker_, 0, sizeof(worker_));
...@@ -147,8 +147,8 @@ uwq::uwq(vmap* vmap, filetable *ftable, padded_length *len) ...@@ -147,8 +147,8 @@ uwq::uwq(vmap* vmap, filetable *ftable, padded_length *len)
uwq::~uwq(void) uwq::~uwq(void)
{ {
if (len_ != nullptr) if (ipc_ != nullptr)
ksfree(slab_userwq, len_); ksfree(slab_userwq, ipc_);
vmap_->decref(); vmap_->decref();
ftable_->decref(); ftable_->decref();
} }
...@@ -156,11 +156,11 @@ uwq::~uwq(void) ...@@ -156,11 +156,11 @@ uwq::~uwq(void)
bool bool
uwq::haswork(void) const uwq::haswork(void) const
{ {
if (len_ == nullptr) if (ipc_ == nullptr)
return false; return false;
for (int i = 0; i < NCPU; i++) { for (int i = 0; i < NCPU; i++) {
if (len_[i].v_ > 0) { if (ipc_->len[i].v_ > 0) {
return true; return true;
} }
} }
...@@ -201,7 +201,7 @@ uwq::tryworker(void) ...@@ -201,7 +201,7 @@ uwq::tryworker(void)
} }
} }
if (slot != -1) { if (slot != -1 && uref_ < ipc_->maxworkers) {
proc* p = allocworker(); proc* p = allocworker();
if (p != nullptr) { if (p != nullptr) {
uwq_worker* w = new uwq_worker(this, p); uwq_worker* w = new uwq_worker(this, p);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
enum { vm_debug = 0 }; enum { vm_debug = 0 };
enum { tlb_shootdown = 1 }; enum { tlb_shootdown = 1 };
enum { tlb_lazy = 1 }; enum { tlb_lazy = 0 };
/* /*
* vmnode * vmnode
......
...@@ -42,7 +42,7 @@ private: ...@@ -42,7 +42,7 @@ private:
percpu<stat> stat_; percpu<stat> stat_;
#if defined(XV6_USER) #if defined(XV6_USER)
padded_length* len_; uwq_ipcbuf* ipc_;
#endif #endif
}; };
...@@ -103,7 +103,9 @@ wq::wq(void) ...@@ -103,7 +103,9 @@ wq::wq(void)
wqlock_init(&q_[i].lock); wqlock_init(&q_[i].lock);
#if defined(XV6_USER) #if defined(XV6_USER)
len_ = allocklen(NCPU*sizeof(padded_length)); ipc_ = allocipc();
assert(wq_maxworkers <= NWORKERS);
ipc_->maxworkers = wq_maxworkers;
#endif #endif
} }
...@@ -121,7 +123,7 @@ inline void ...@@ -121,7 +123,7 @@ inline void
wq::inclen(int c) wq::inclen(int c)
{ {
#if defined(XV6_USER) #if defined(XV6_USER)
__sync_fetch_and_add(&len_[c].v_, 1); __sync_fetch_and_add(&ipc_->len[c].v_, 1);
#endif #endif
} }
...@@ -129,7 +131,7 @@ inline void ...@@ -129,7 +131,7 @@ inline void
wq::declen(int c) wq::declen(int c)
{ {
#if defined(XV6_USER) #if defined(XV6_USER)
__sync_fetch_and_sub(&len_[c].v_, 1); __sync_fetch_and_sub(&ipc_->len[c].v_, 1);
#endif #endif
} }
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论