More rejiggering to support userspace wq from the kernel.

..also it turns out vmap->decref() in ~proc is not what I wanted.
上级 7e84b1c2
#pragma once
struct padded_length {
volatile u64 v_ __mpalign__;;
__padout__;
};
#if defined (XV6_KERNEL)
struct uwq_length {
uwq_length(padded_length *len) : len_(len) {}
void clear() {
for (int i = 0; i < NCPU; i++)
len_[i].v_ = 0;
}
bool haswork() {
for (int i = 0; i < NCPU; i++) {
if (len_[i].v_ > 0)
return true;
}
return false;
}
struct uwq {
uwq(padded_length *len);
~uwq();
bool haswork();
void* buffer();
private:
uwq(const uwq& x);
padded_length* len_;
};
......
......@@ -5,6 +5,7 @@
#include "radix.hh"
#include "cpputil.hh"
#include "hwvm.hh"
#include "uwq.hh"
#define VM_CRANGE 1
#define VM_RADIX 0
......@@ -62,7 +63,7 @@ struct vma
// An address space: a set of vmas plus h/w page table.
// The elements of e[] are not ordered by address.
struct vmap {
struct vmap : public rcu_freed {
#if VM_CRANGE
struct crange cr;
#endif
......@@ -90,10 +91,11 @@ struct vmap {
int copyout(uptr va, void *p, u64 len);
int sbrk(ssize_t n, uptr *addr);
virtual void do_gc(void) { delete this; }
NEW_DELETE_OPS(vmap)
uptr brk_; // Top of heap
padded_length* const uwq_len_;
uwq uwq_;
private:
int pagefault_wcow(vma *m);
......
......@@ -118,7 +118,8 @@ idleloop(void)
idlem->heir = p;
}
// XXX(sbw) worked = uwq_trywork();
// XXX(sbw)
worked = uwq_trywork();
worked = wq_trywork();
// If we are no longer the idle thread, exit
......
......@@ -55,9 +55,6 @@ proc::proc(int npid) :
proc::~proc(void)
{
if (vmap != nullptr)
vmap->decref();
destroylock(&lock);
destroycondvar(&cv);
}
......@@ -414,6 +411,8 @@ fork(int flags)
void
finishproc(struct proc *p)
{
if (p->vmap != nullptr)
p->vmap->decref();
ksfree(slab_stack, p->kstack);
p->kstack = 0;
if (!xnspid->remove(p->pid, &p))
......
......@@ -9,6 +9,7 @@
#include "condvar.h"
#include "proc.hh"
#include "vm.hh"
#include "kalloc.hh"
int
uwq_trywork(void)
......@@ -28,12 +29,9 @@ uwq_trywork(void)
struct proc *p = c->proc;
if (p == nullptr || p->vmap == nullptr)
continue;
padded_length *len = p->vmap->uwq_len_;
uwq* uwq = &p->vmap->uwq_;
if (len == nullptr)
break;
if (uwq_length(len).haswork()) {
if (uwq->haswork()) {
// XXX(sbw) start a worker thread..
break;
}
......@@ -41,3 +39,43 @@ uwq_trywork(void)
return 0;
}
//
// uwq
//
uwq::uwq(padded_length *len)
: len_(len)
{
if (len_ != nullptr) {
for (int i = 0; i < NCPU; i++)
len_[i].v_ = 0;
} else {
cprintf("uwq::uwq: nullptr len\n");
}
}
uwq::~uwq(void)
{
if (len_ != nullptr)
ksfree(slab_userwq, len_);
}
bool
uwq::haswork(void)
{
if (len_ == nullptr)
return false;
for (int i = 0; i < NCPU; i++) {
if (len_[i].v_ > 0) {
return true;
}
}
return false;
}
void*
uwq::buffer(void)
{
return (void*)len_;
}
......@@ -139,7 +139,7 @@ vma::~vma()
* vmap
*/
vmap::vmap() :
vmap::vmap() : rcu_freed("vm"),
#if VM_CRANGE
cr(10),
#endif
......@@ -147,7 +147,7 @@ vmap::vmap() :
rx(PGSHIFT),
#endif
ref(1), pml4(setupkvm()), kshared((char*) ksalloc(slab_kshared)),
brk_(0), uwq_len_((padded_length*) ksalloc(slab_userwq))
brk_(0), uwq_((padded_length*) ksalloc(slab_userwq))
{
initlock(&brklock_, "brk_lock", LOCKSTAT_VM);
if (pml4 == 0) {
......@@ -160,14 +160,12 @@ vmap::vmap() :
goto err;
}
if (uwq_len_ == nullptr) {
if (uwq_.buffer() == nullptr) {
cprintf("vmap::vmap: userwq out of memory\n");
goto err;
} else {
uwq_length(uwq_len_).clear();
}
if (setupuvm(pml4, kshared, (char*)uwq_len_)) {
if (setupuvm(pml4, kshared, (char*)uwq_.buffer())) {
cprintf("vmap::vmap: setupkshared out of memory\n");
goto err;
}
......@@ -177,8 +175,6 @@ vmap::vmap() :
err:
if (kshared)
ksfree(slab_kshared, kshared);
if (uwq_len_)
ksfree(slab_userwq, uwq_len_);
if (pml4)
freevm(pml4);
}
......@@ -187,8 +183,6 @@ vmap::~vmap()
{
if (kshared)
ksfree(slab_kshared, kshared);
if (uwq_len_)
ksfree(slab_userwq, uwq_len_);
if (pml4)
freevm(pml4);
destroylock(&brklock_);
......@@ -198,7 +192,7 @@ void
vmap::decref()
{
if (--ref == 0)
delete this;
gc_delayed(this);
}
bool
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论