提交 d0b095e0 创建 作者: Austin Clements's avatar Austin Clements

percpu safety policies and scoped pushcli/popcli around percpu access

These safety policies help ensure that an accessor does not migrate CPUs while accessing a percpu variable. The "cli" safety policy only allows percpu variables to be accessed when interrupts are disabled. The "internal" safety policy leaves it up to the caller. Not surprisingly, most current percpu variables use internal.
上级 b0f6dd3b
...@@ -71,4 +71,4 @@ enum { ...@@ -71,4 +71,4 @@ enum {
slab_type_max slab_type_max
}; };
extern percpu<kmem> kmems; extern percpu<kmem, percpu_safety::internal> kmems;
#pragma once #pragma once
#include "cpu.hh" #include "cpu.hh"
#include "amd64.h"
#include "bits.hh"
template <typename T> extern void pushcli(void);
extern void popcli(void);
// Safety policy for how to protect against CPU migrations while using
// a per-CPU variable.
enum class percpu_safety {
// Interrupts are disabled so the thread cannot migrate. This can
// be done in the calling code, or using the load method to get a
// scoped cli.
cli,
// No protection against migration is required. The variables are
// internally thread-safe. Generally the per-CPU variable is used
// only as a sharding mechanism.
internal,
};
template <typename T, percpu_safety S>
struct percpu;
template<typename T>
struct percpuval {
~percpuval() {
if (cli_)
popcli();
}
percpuval(percpuval &&o)
: val_(o.val_), cli_(o.cli_) {
o.val_ = nullptr;
o.cli_ = false;
}
percpuval(const percpuval &o) = delete;
percpuval &operator=(const percpuval &o) = delete;
T* get() {
return val_;
}
T* operator->() {
return val_;
}
T& operator*() {
return *val_;
}
void release() {
if (cli_)
popcli();
val_ = nullptr;
cli_ = false;
}
private:
constexpr percpuval(T* val, bool cli) : val_(val), cli_(cli) { }
template<typename, percpu_safety> friend struct percpu;
T* val_;
bool cli_;
};
template <typename T, percpu_safety S = percpu_safety::cli>
struct percpu { struct percpu {
percpu() = default; constexpr percpu() = default;
percpu(const percpu &o) = delete; percpu(const percpu &o) = delete;
percpu(percpu &&o) = delete; percpu(percpu &&o) = delete;
percpu &operator=(const percpu &o) = delete; percpu &operator=(const percpu &o) = delete;
T* get() const { // Return an object that wraps this CPU's value and protects against
// CPU migration using the safety policy as long as it is in scope.
percpuval<T> load() const {
if (S == percpu_safety::cli)
pushcli();
return percpuval<T>(cpu(myid()), S == percpu_safety::cli);
}
// Ignore the safety policy and return the value of this variable
// for the CPU that is current at some instant between entering and
// returning from this method.
T* get_unchecked() const {
return cpu(myid()); return cpu(myid());
} }
T* operator->() const { T* operator->() const {
if (S == percpu_safety::cli)
assert(!(readrflags() & FL_IF));
return cpu(myid()); return cpu(myid());
} }
T& operator*() const { T& operator*() const {
if (S == percpu_safety::cli)
assert(!(readrflags() & FL_IF));
return *cpu(myid()); return *cpu(myid());
} }
......
...@@ -73,8 +73,8 @@ private: ...@@ -73,8 +73,8 @@ private:
u64 steal; u64 steal;
}; };
percpu<wqueue> q_; percpu<wqueue, percpu_safety::internal> q_;
percpu<stat> stat_; percpu<stat, percpu_safety::internal> stat_;
#if defined(XV6_USER) #if defined(XV6_USER)
uwq_ipcbuf* ipc_; uwq_ipcbuf* ipc_;
......
...@@ -63,7 +63,9 @@ struct nscache { ...@@ -63,7 +63,9 @@ struct nscache {
NEW_DELETE_OPS(nscache); NEW_DELETE_OPS(nscache);
}; };
percpu<nscache> nscache_; // XXX(austin) If we used percpu_safety::cli here, would nscache no
// longer need the lock?
percpu<nscache, percpu_safety::internal> nscache_;
nscache::nscache(void) nscache::nscache(void)
{ {
......
...@@ -91,6 +91,9 @@ idleloop(void) ...@@ -91,6 +91,9 @@ idleloop(void)
// mtrace_call_set(1, cpu->id); // mtrace_call_set(1, cpu->id);
mtstart(idleloop, myproc()); mtstart(idleloop, myproc());
// The scheduler ensures that each idle loop always runs on the same CPU
struct idle *myidle = idlem.get_unchecked();
sti(); sti();
for (;;) { for (;;) {
acquire(&myproc()->lock); acquire(&myproc()->lock);
...@@ -105,7 +108,7 @@ idleloop(void) ...@@ -105,7 +108,7 @@ idleloop(void)
assert(mycpu()->ncli == 0); assert(mycpu()->ncli == 0);
// If we don't have an heir, try to allocate one // If we don't have an heir, try to allocate one
if (idlem->heir == nullptr) { if (myidle->heir == nullptr) {
struct proc *p; struct proc *p;
p = proc::alloc(); p = proc::alloc();
if (p == nullptr) if (p == nullptr)
...@@ -115,7 +118,7 @@ idleloop(void) ...@@ -115,7 +118,7 @@ idleloop(void)
p->cpu_pin = 1; p->cpu_pin = 1;
p->context->rip = (u64)idleheir; p->context->rip = (u64)idleheir;
p->cwd = nullptr; p->cwd = nullptr;
idlem->heir = p; myidle->heir = p;
} }
if (uwq_trywork()) if (uwq_trywork())
...@@ -123,7 +126,7 @@ idleloop(void) ...@@ -123,7 +126,7 @@ idleloop(void)
worked = wq_trywork(); worked = wq_trywork();
// If we are no longer the idle thread, exit // If we are no longer the idle thread, exit
if (worked && idlem->cur != myproc()) if (worked && myidle->cur != myproc())
exit(); exit();
} while(worked); } while(worked);
sti(); sti();
......
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
static struct Mbmem mem[128]; static struct Mbmem mem[128];
static u64 nmem; static u64 nmem;
static u64 membytes; static u64 membytes;
percpu<kmem> kmems; percpu<kmem, percpu_safety::internal> kmems;
percpu<kmem> slabmem[slab_type_max]; percpu<kmem, percpu_safety::internal> slabmem[slab_type_max];
extern char end[]; // first address after kernel loaded from ELF file extern char end[]; // first address after kernel loaded from ELF file
char *newend; char *newend;
...@@ -128,6 +128,8 @@ kmem::alloc(const char* name) ...@@ -128,6 +128,8 @@ kmem::alloc(const char* name)
panic("kmem:alloc: aba race %p %p %p\n", panic("kmem:alloc: aba race %p %p %p\n",
r, r->next, nxt); r, r->next, nxt);
nfree--; nfree--;
if (!name)
name = this->name;
mtlabel(mtrace_label_block, r, size, name, strlen(name)); mtlabel(mtrace_label_block, r, size, name, strlen(name));
return r; return r;
} }
...@@ -169,7 +171,7 @@ kfree_pool(struct kmem *m, char *v) ...@@ -169,7 +171,7 @@ kfree_pool(struct kmem *m, char *v)
} }
static void static void
kmemprint_pool(const percpu<kmem> &km) kmemprint_pool(const percpu<kmem, percpu_safety::internal> &km)
{ {
cprintf("pool %s: [ ", &km[0].name[1]); cprintf("pool %s: [ ", &km[0].name[1]);
for (u32 i = 0; i < NCPU; i++) for (u32 i = 0; i < NCPU; i++)
...@@ -190,7 +192,7 @@ kmemprint() ...@@ -190,7 +192,7 @@ kmemprint()
static char* static char*
kalloc_pool(const percpu<kmem> &km, const char *name) kalloc_pool(const percpu<kmem, percpu_safety::internal> &km, const char *name)
{ {
struct run *r = 0; struct run *r = 0;
struct kmem *m; struct kmem *m;
...@@ -203,7 +205,7 @@ kalloc_pool(const percpu<kmem> &km, const char *name) ...@@ -203,7 +205,7 @@ kalloc_pool(const percpu<kmem> &km, const char *name)
} }
if (r == 0) { if (r == 0) {
cprintf("kalloc: out of memory in pool %s\n", km->name); cprintf("kalloc: out of memory in pool %s\n", km.get_unchecked()->name);
// kmemprint(); // kmemprint();
return 0; return 0;
} }
...@@ -227,7 +229,7 @@ kalloc(const char *name) ...@@ -227,7 +229,7 @@ kalloc(const char *name)
void * void *
ksalloc(int slab) ksalloc(int slab)
{ {
return kalloc_pool(slabmem[slab], slabmem[slab]->name); return kalloc_pool(slabmem[slab], nullptr);
} }
void void
...@@ -320,5 +322,5 @@ kfree(void *v) ...@@ -320,5 +322,5 @@ kfree(void *v)
void void
ksfree(int slab, void *v) ksfree(int slab, void *v)
{ {
kfree_pool(slabmem[slab].get(), (char*) v); kfree_pool(&*slabmem[slab], (char*) v);
} }
...@@ -20,7 +20,7 @@ struct zallocator { ...@@ -20,7 +20,7 @@ struct zallocator {
void free(void*); void free(void*);
void tryrefill(); void tryrefill();
}; };
percpu<zallocator> z_; percpu<zallocator, percpu_safety::internal> z_;
struct zwork : public work { struct zwork : public work {
zwork(wframe* frame, zallocator* zer) zwork(wframe* frame, zallocator* zer)
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论