Export scheduler stats to userspace

上级 c021139f
...@@ -50,6 +50,7 @@ main(int ac, const char *av[]) ...@@ -50,6 +50,7 @@ main(int ac, const char *av[])
xav = &xav[1]; xav = &xav[1];
} }
sys_stat* s0 = sys_stat::read();
pmc_count::config(pmc_selector[pmci].sel); pmc_count::config(pmc_selector[pmci].sel);
pmc_count pmc0 = pmc_count::read(0); pmc_count pmc0 = pmc_count::read(0);
u64 t0 = rdtsc(); u64 t0 = rdtsc();
...@@ -67,11 +68,15 @@ main(int ac, const char *av[]) ...@@ -67,11 +68,15 @@ main(int ac, const char *av[])
} }
wait(); wait();
sys_stat* s1 = sys_stat::read();
pmc_count pmc1 = pmc_count::read(0); pmc_count pmc1 = pmc_count::read(0);
u64 t1 = rdtsc(); u64 t1 = rdtsc();
sys_stat* s2 = s1->delta(s0);
fprintf(1, "%s cycles\n", valstr(t1-t0)); fprintf(1, "%s cycles\n", valstr(t1-t0));
fprintf(1, "%s %s\n", valstr(pmc1.delta(pmc0).sum()), fprintf(1, "%s %s\n", valstr(pmc1.delta(pmc0).sum()),
pmc_selector[pmci].name); pmc_selector[pmci].name);
fprintf(1, "%lu %lu\n", s2->busy(), s2->idle());
exit(); exit();
} }
...@@ -85,3 +85,4 @@ extern struct devsw devsw[]; ...@@ -85,3 +85,4 @@ extern struct devsw devsw[];
#define NETIF 2 #define NETIF 2
#define SAMPLER 3 #define SAMPLER 3
#define DEVLOCKSTAT 4 #define DEVLOCKSTAT 4
#define DEVSTAT 5
#include "sched.hh"
struct pmc_count { struct pmc_count {
static void config(u64 sel) { static void config(u64 sel) {
...@@ -39,3 +41,61 @@ struct pmc_count { ...@@ -39,3 +41,61 @@ struct pmc_count {
u64 count_[NCPU]; u64 count_[NCPU];
}; };
struct sys_stat {
static sys_stat* read() {
sys_stat* that;
int fd;
long r;
that = new sys_stat();
assert(that != nullptr);
fd = open("/dev/stat", O_RDONLY);
assert(fd != -1);
r = ::read(fd, that->stats, sizeof(that->stats));
assert(r == sizeof(that->stats));
return that;
}
sys_stat* delta(const sys_stat* o) const {
sys_stat* that;
that = new sys_stat();
for (int i = 0; i < NCPU; i++) {
that->stats[i].enqs = stats[i].enqs - o->stats[i].enqs;
that->stats[i].deqs = stats[i].deqs - o->stats[i].deqs;
that->stats[i].steals = stats[i].steals - o->stats[i].steals;
that->stats[i].misses = stats[i].misses - o->stats[i].misses;
that->stats[i].idle = stats[i].idle - o->stats[i].idle;
that->stats[i].busy = stats[i].busy - o->stats[i].busy;
}
return that;
}
u64 busy() const {
u64 tot = 0;
for (int i = 0; i < NCPU; i++)
tot += stats[i].busy;
return tot;
}
u64 idle() const {
u64 tot = 0;
for (int i = 0; i < NCPU; i++)
tot += stats[i].idle;
return tot;
}
static void* operator new(unsigned long nbytes) {
assert(nbytes == sizeof(sys_stat));
return malloc(nbytes);
}
static void operator delete(void* p) {
free(p);
}
sched_stat stats[NCPU];
};
...@@ -4,3 +4,13 @@ struct sched_link ...@@ -4,3 +4,13 @@ struct sched_link
sched_link* next; sched_link* next;
}; };
struct sched_stat
{
u64 enqs;
u64 deqs;
u64 steals;
u64 misses;
u64 idle;
u64 busy;
u64 schedstart;
};
...@@ -30,6 +30,8 @@ public: ...@@ -30,6 +30,8 @@ public:
assert(nbytes == sizeof(schedule)); assert(nbytes == sizeof(schedule));
return buf; return buf;
} }
sched_stat stats_;
private: private:
struct spinlock lock_; struct spinlock lock_;
...@@ -37,13 +39,6 @@ private: ...@@ -37,13 +39,6 @@ private:
void sanity(void); void sanity(void);
struct {
std::atomic<u64> enqs;
std::atomic<u64> deqs;
std::atomic<u64> steals;
std::atomic<u64> misses;
} stats_;
volatile u64 ncansteal_ __mpalign__; volatile u64 ncansteal_ __mpalign__;
}; };
percpu<schedule> schedule_; percpu<schedule> schedule_;
...@@ -60,6 +55,9 @@ schedule::schedule(void) ...@@ -60,6 +55,9 @@ schedule::schedule(void)
stats_.deqs = 0; stats_.deqs = 0;
stats_.steals = 0; stats_.steals = 0;
stats_.misses = 0; stats_.misses = 0;
stats_.idle = 0;
stats_.busy = 0;
stats_.schedstart = 0;
} }
void void
...@@ -81,6 +79,10 @@ schedule::enq(proc* p) ...@@ -81,6 +79,10 @@ schedule::enq(proc* p)
proc* proc*
schedule::deq(void) schedule::deq(void)
{ {
if (head_.next == &head_)
return nullptr;
ANON_REGION(__func__, &perfgroup);
// Remove from head // Remove from head
scoped_acquire x(&lock_); scoped_acquire x(&lock_);
sched_link* entry = head_.next; sched_link* entry = head_.next;
...@@ -102,6 +104,7 @@ schedule::steal(bool nonexec) ...@@ -102,6 +104,7 @@ schedule::steal(bool nonexec)
if (ncansteal_ == 0 || !tryacquire(&lock_)) if (ncansteal_ == 0 || !tryacquire(&lock_))
return nullptr; return nullptr;
ANON_REGION(__func__, &perfgroup);
for (sched_link* ptr = head_.next; ptr != &head_; ptr = ptr->next) for (sched_link* ptr = head_.next; ptr != &head_; ptr = ptr->next)
if (cansteal((proc*)ptr, nonexec)) { if (cansteal((proc*)ptr, nonexec)) {
ptr->next->prev = ptr->prev; ptr->next->prev = ptr->prev;
...@@ -121,10 +124,10 @@ void ...@@ -121,10 +124,10 @@ void
schedule::dump(void) schedule::dump(void)
{ {
cprintf("%8lu %8lu %8lu %8lu\n", cprintf("%8lu %8lu %8lu %8lu\n",
stats_.enqs.load(), stats_.enqs,
stats_.deqs.load(), stats_.deqs,
stats_.steals.load(), stats_.steals,
stats_.misses.load()); stats_.misses);
stats_.enqs = 0; stats_.enqs = 0;
stats_.deqs = 0; stats_.deqs = 0;
...@@ -214,6 +217,7 @@ scheddump(void) ...@@ -214,6 +217,7 @@ scheddump(void)
void void
addrun(struct proc* p) addrun(struct proc* p)
{ {
ANON_REGION(__func__, &perfgroup);
p->set_state(RUNNABLE); p->set_state(RUNNABLE);
schedule_[p->cpuid].enq(p); schedule_[p->cpuid].enq(p);
} }
...@@ -250,6 +254,13 @@ sched(void) ...@@ -250,6 +254,13 @@ sched(void)
// Interrupts are disabled // Interrupts are disabled
next = schedule_->deq(); next = schedule_->deq();
u64 t = rdtsc();
if (myproc() == idleproc())
schedule_->stats_.idle += t - schedule_->stats_.schedstart;
else
schedule_->stats_.busy += t - schedule_->stats_.schedstart;
schedule_->stats_.schedstart = t;
if (next == nullptr) { if (next == nullptr) {
if (myproc()->get_state() != RUNNABLE || if (myproc()->get_state() != RUNNABLE ||
// proc changed its CPU pin? // proc changed its CPU pin?
...@@ -293,9 +304,29 @@ sched(void) ...@@ -293,9 +304,29 @@ sched(void)
post_swtch(); post_swtch();
} }
static int
statread(struct inode *inode, char *dst, u32 off, u32 n)
{
// Sort of like a binary /proc/stat
size_t sz = NCPU*sizeof(sched_stat);
if (n != sz)
return -1;
for (int i = 0; i < NCPU; i++) {
memcpy(&dst[i*sizeof(sched_stat)], &schedule_[i].stats_,
sizeof(schedule_[i].stats_));
}
return n;
}
void void
initsched(void) initsched(void)
{ {
for (int i = 0; i < NCPU; i++) for (int i = 0; i < NCPU; i++)
new (&schedule_[i]) schedule(); new (&schedule_[i]) schedule();
devsw[DEVSTAT].write = nullptr;
devsw[DEVSTAT].read = statread;
} }
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论