提交 ba71a527 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

start trying to handle resource exhaustion

上级 644017a3
......@@ -3,7 +3,7 @@
#include "user.h"
#define NCHILD 2
#define NDEPTH 5
#define NDEPTH 12
void
forktree(int depth)
......
......@@ -45,6 +45,8 @@ class scoped_acquire {
void acquire(spinlock *l) { assert(!_l); ::acquire(l); _l = l; }
};
class retryable {};
namespace std {
template<class T>
struct remove_reference
......@@ -113,6 +115,8 @@ namespace std {
s.next_width = sw._n;
return s;
}
class bad_alloc : public retryable {};
}
/* C++ runtime */
......@@ -133,7 +137,10 @@ extern void *__dso_handle;
#define NEW_DELETE_OPS(classname) \
static void* operator new(unsigned long nbytes) { \
assert(nbytes == sizeof(classname)); \
return kmalloc(sizeof(classname), #classname); \
void* p = kmalloc(sizeof(classname), #classname); \
if (p == nullptr) \
throw std::bad_alloc(); \
return p; \
} \
\
static void* operator new(unsigned long nbytes, classname *buf) { \
......
#pragma once
#define E_RETRY -3
......@@ -211,4 +211,14 @@ initcpprt(void)
{
extern u8 __EH_FRAME_BEGIN__[];
__register_frame(__EH_FRAME_BEGIN__);
// Initialize lazy exception handling data structures
try {
throw 5;
} catch (int& x) {
assert(x == 5);
return;
}
panic("no catch");
}
......@@ -205,7 +205,7 @@ kalloc_pool(struct kmem *km, const char *name)
if (r == 0) {
cprintf("kalloc: out of memory in pool %s\n", km->name);
kmemprint();
// kmemprint();
return 0;
}
......
......@@ -59,6 +59,8 @@ proc::proc(int npid) :
proc::~proc(void)
{
if (kstack)
ksfree(slab_stack, kstack);
destroylock(&lock);
destroycondvar(&cv);
}
......@@ -350,7 +352,9 @@ fork(int flags)
// Allocate process.
if((np = proc::alloc()) == 0)
return -1;
throw std::bad_alloc();
// XXX use a scoped cleanup handler to do xnspid->remove & freeproc
if(flags & FORK_SHARE_VMAP) {
np->vmap = myproc()->vmap;
......@@ -358,12 +362,10 @@ fork(int flags)
} else {
// Copy process state from p.
if((np->vmap = myproc()->vmap->copy(cow)) == 0){
ksfree(slab_stack, np->kstack);
np->kstack = 0;
if (!xnspid->remove(np->pid, &np))
panic("fork: ns_remove");
freeproc(np);
return -1;
throw std::bad_alloc();
}
}
......@@ -384,7 +386,7 @@ fork(int flags)
if (np->ftable == nullptr) {
// XXX(sbw) leaking?
freeproc(np);
return -1;
throw std::bad_alloc();
}
}
......
......@@ -9,7 +9,6 @@
#include "syscall.h"
#include "cpu.hh"
#include "kmtrace.hh"
#include "errno.h"
extern "C" int __uaccess_mem(void* dst, const void* src, u64 size);
extern "C" int __uaccess_str(char* dst, const char* src, u64 size);
......@@ -81,23 +80,25 @@ argcheckptr(void *p, int size)
u64
syscall(u64 a0, u64 a1, u64 a2, u64 a3, u64 a4, u64 num)
{
u64 r;
mt_ascope ascope("syscall(%lx,%lx,%lx,%lx,%lx,%lx)", num, a0, a1, a2, a3, a4);
retry:
if(num < SYS_ncount && syscalls[num]) {
mtstart(syscalls[num], myproc());
mtrec();
r = syscalls[num](a0, a1, a2, a3, a4);
mtstop(myproc());
mtign();
} else {
cprintf("%d %s: unknown sys call %ld\n",
myproc()->pid, myproc()->name, num);
r = -1;
}
if (r == E_RETRY)
goto retry;
return r;
for (;;) {
try {
if(num < SYS_ncount && syscalls[num]) {
mtstart(syscalls[num], myproc());
mtrec();
u64 r = syscalls[num](a0, a1, a2, a3, a4);
mtstop(myproc());
mtign();
return r;
} else {
cprintf("%d %s: unknown sys call %ld\n",
myproc()->pid, myproc()->name, num);
return -1;
}
} catch (retryable& e) {
cprintf("%d: syscall retry\n", myproc()->pid);
yield();
}
}
}
......@@ -112,16 +112,13 @@ vmnode::copy()
vmnode *c = new vmnode(npages, type,
(type==ONDEMAND) ? idup(ip) : 0,
offset, sz);
if(c == 0)
return 0;
if (empty)
return c;
if (c->allocall(false) < 0) {
cprintf("vmn_copy: out of memory\n");
delete c;
return 0;
throw std::bad_alloc();
}
for(u64 i = 0; i < npages; i++)
if (page[i])
......@@ -280,8 +277,6 @@ vmap*
vmap::copy(int share)
{
vmap *nm = new vmap();
if(nm == 0)
return 0;
#if VM_CRANGE
for (auto r: cr) {
......@@ -318,17 +313,10 @@ vmap::copy(int share)
});
}
} else {
// XXX free vmnode copy if vma alloc fails
ne = new vma(nm, e->vma_start, e->vma_end, PRIVATE, e->n->copy());
}
if (ne == 0)
goto err;
if (ne->n == 0) {
delete ne;
goto err;
}
#if VM_CRANGE
auto span = nm->cr.search_lock(ne->vma_start, ne->vma_end - ne->vma_start);
#endif
......@@ -362,12 +350,7 @@ vmap::copy(int share)
}
nm->brk_ = brk_;
return nm;
err:
delete nm;
return 0;
}
// Does any vma overlap start..start+len?
......@@ -549,10 +532,6 @@ vmap::pagefault_wcow(vma *m)
// because other processes may change ref count while this process
// is handling wcow.
struct vmnode *nodecopy = m->n->copy();
if (nodecopy == 0) {
cprintf("pagefault_wcow: out of mem\n");
return -1;
}
vma *repl = new vma(this, m->vma_start, m->vma_end, PRIVATE, nodecopy);
......@@ -663,7 +642,13 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
mtwriteavar("page:%p.%016x", vmap, PGROUNDDOWN(va));
#endif
return vmap->pagefault(va, err);
for (;;) {
try {
return vmap->pagefault(va, err);
} catch (retryable& e) {
cprintf("%d: pagefault retry\n", myproc()->pid);
}
}
}
// Copy len bytes from p to user address va in vmap.
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论