Split apic IDs from the sequential CPU ids

上级 4339ad7c
......@@ -10,7 +10,8 @@ extern atomic<u64> tlbflush_req;
// Per-CPU state
struct cpu {
u8 id; // Local APIC ID; index into cpus[] below
cpuid_t id; // Index into cpus[] below
hwid_t hwid; // Local APIC ID
int ncli; // Depth of pushcli nesting.
int intena; // Were interrupts enabled before pushcli?
struct segdesc gdt[NSEGS]; // x86 global descriptor table
......@@ -56,3 +57,9 @@ mykmem(void)
__asm volatile("movq %%gs:16, %0" : "=r" (val));
return (struct kmem *)val;
}
static inline cpuid_t
myid(void)
{
return mycpu()->id;
}
......@@ -139,7 +139,7 @@ void kmemprint(void);
void kbdintr(void);
// lapic.c
int cpunum(void);
hwid_t lapicid(void);
void lapicstartap(u8, u32 addr);
void lapiceoi(void);
void lapic_tlbflush(u32);
......
......@@ -16,6 +16,9 @@ typedef uptr paddr;
// Page Map Entry (refers to any entry in any level)
typedef u64 pme_t;
typedef u8 cpuid_t;
typedef u8 hwid_t;
#ifdef XV6
// POSIX types
typedef s64 ssize_t;
......
......@@ -210,15 +210,21 @@ void
inittls(void)
{
struct cpu *c;
cpuid_t id = -1;
for (id = 0; id < NCPU; id++)
if (cpus[id].hwid == lapicid())
break;
assert(id != -1);
// Initialize cpu-local storage.
c = &cpus[cpunum()];
c = &cpus[id];
writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&c->cpu);
writemsr(MSR_GS_KERNBASE, (u64)&c->cpu);
c->cpu = c;
c->proc = nullptr;
c->kmem = &kmems[cpunum()];
c->kmem = &kmems[id];
}
atomic<u64> tlbflush_req;
......
......@@ -139,12 +139,12 @@ initidle(void)
if (!p)
panic("initidle proc::alloc");
SLIST_INIT(&idlem[cpunum()].zombies);
initlock(&idlem[cpunum()].lock, "idle_lock", LOCKSTAT_IDLE);
SLIST_INIT(&idlem[myid()].zombies);
initlock(&idlem[myid()].lock, "idle_lock", LOCKSTAT_IDLE);
snprintf(p->name, sizeof(p->name), "idle_%u", cpunum());
snprintf(p->name, sizeof(p->name), "idle_%u", myid());
mycpu()->proc = p;
myproc()->cpuid = cpunum();
myproc()->cpuid = myid();
myproc()->cpu_pin = 1;
idlem->cur = p;
}
......@@ -137,8 +137,8 @@ lapicpc(char mask)
lapicw(PCINT, mask ? MASKED : MT_NMI);
}
int
cpunum(void)
hwid_t
lapicid(void)
{
// Cannot call cpu when interrupts are enabled:
// result not guaranteed to last long enough to be used!
......
......@@ -43,8 +43,8 @@ static volatile int bstate;
void
mpboot(void)
{
initseg();
inittls();
initseg();
initlapic();
initsamp();
initidle();
......@@ -70,7 +70,7 @@ bootothers(void)
memmove(code, _bootother_start, _bootother_size);
for(c = cpus; c < cpus+ncpu; c++){
if(c == cpus+cpunum()) // We've started already.
if(c == cpus+myid()) // We've started already.
continue;
// Tell bootother.S what stack to use and the address of apstart;
......
......@@ -115,11 +115,12 @@ initmp(void)
panic("initmp: too many CPUs");
if(ncpu != proc->apicid){
cprintf("mpinit: ncpu=%d apicid=%d\n", ncpu, proc->apicid);
ismp = 0;
//ismp = 0;
}
if(proc->flags & MPBOOT)
bcpu = &cpus[ncpu];
cpus[ncpu].id = ncpu;
cpus[ncpu].hwid = proc->apicid;
ncpu++;
p += sizeof(struct mpproc);
continue;
......
......@@ -244,7 +244,7 @@ sampwrite(struct inode *ip, char *buf, u32 off, u32 n)
void
initsamp(void)
{
if (cpunum() == mpbcpu()) {
if (myid() == mpbcpu()) {
u32 name[4];
char *s = (char *)name;
name[3] = 0;
......@@ -267,8 +267,8 @@ initsamp(void)
void *p = ksalloc(slab_perf);
if (p == nullptr)
panic("initprof: ksalloc");
pmulog[cpunum()].event = (pmuevent*) p;
pmulog[cpunum()].capacity = PERFSIZE / sizeof(struct pmuevent);
pmulog[myid()].event = (pmuevent*) p;
pmulog[myid()].capacity = PERFSIZE / sizeof(struct pmuevent);
devsw[SAMPLER].write = sampwrite;
devsw[SAMPLER].read = sampread;
......
......@@ -21,7 +21,7 @@ u64
sysentry_c(u64 a0, u64 a1, u64 a2, u64 a3, u64 a4, u64 num)
{
writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&cpus[cpunum()].cpu);
writemsr(MSR_GS_BASE, (u64)&cpus[lapicid()].cpu);
sti();
......@@ -46,7 +46,7 @@ void
trap(struct trapframe *tf)
{
writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&cpus[cpunum()].cpu);
writemsr(MSR_GS_BASE, (u64)&cpus[lapicid()].cpu);
if (tf->trapno == T_NMI) {
// The only locks that we can acquire during NMI are ones
......@@ -230,7 +230,7 @@ initseg(void)
lidt((void *)&dtr.limit);
// TLS might not be ready
c = &cpus[cpunum()];
c = &cpus[myid()];
// Load per-CPU GDT
memmove(c->gdt, bootgdt, sizeof(bootgdt));
dtr.limit = sizeof(c->gdt) - 1;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论