Split apic IDs from the sequential CPU ids

上级 4339ad7c
...@@ -10,7 +10,8 @@ extern atomic<u64> tlbflush_req; ...@@ -10,7 +10,8 @@ extern atomic<u64> tlbflush_req;
// Per-CPU state // Per-CPU state
struct cpu { struct cpu {
u8 id; // Local APIC ID; index into cpus[] below cpuid_t id; // Index into cpus[] below
hwid_t hwid; // Local APIC ID
int ncli; // Depth of pushcli nesting. int ncli; // Depth of pushcli nesting.
int intena; // Were interrupts enabled before pushcli? int intena; // Were interrupts enabled before pushcli?
struct segdesc gdt[NSEGS]; // x86 global descriptor table struct segdesc gdt[NSEGS]; // x86 global descriptor table
...@@ -56,3 +57,9 @@ mykmem(void) ...@@ -56,3 +57,9 @@ mykmem(void)
__asm volatile("movq %%gs:16, %0" : "=r" (val)); __asm volatile("movq %%gs:16, %0" : "=r" (val));
return (struct kmem *)val; return (struct kmem *)val;
} }
static inline cpuid_t
myid(void)
{
return mycpu()->id;
}
...@@ -139,7 +139,7 @@ void kmemprint(void); ...@@ -139,7 +139,7 @@ void kmemprint(void);
void kbdintr(void); void kbdintr(void);
// lapic.c // lapic.c
int cpunum(void); hwid_t lapicid(void);
void lapicstartap(u8, u32 addr); void lapicstartap(u8, u32 addr);
void lapiceoi(void); void lapiceoi(void);
void lapic_tlbflush(u32); void lapic_tlbflush(u32);
......
...@@ -16,6 +16,9 @@ typedef uptr paddr; ...@@ -16,6 +16,9 @@ typedef uptr paddr;
// Page Map Entry (refers to any entry in any level) // Page Map Entry (refers to any entry in any level)
typedef u64 pme_t; typedef u64 pme_t;
typedef u8 cpuid_t;
typedef u8 hwid_t;
#ifdef XV6 #ifdef XV6
// POSIX types // POSIX types
typedef s64 ssize_t; typedef s64 ssize_t;
......
...@@ -210,15 +210,21 @@ void ...@@ -210,15 +210,21 @@ void
inittls(void) inittls(void)
{ {
struct cpu *c; struct cpu *c;
cpuid_t id = -1;
for (id = 0; id < NCPU; id++)
if (cpus[id].hwid == lapicid())
break;
assert(id != -1);
// Initialize cpu-local storage. // Initialize cpu-local storage.
c = &cpus[cpunum()]; c = &cpus[id];
writegs(KDSEG); writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&c->cpu); writemsr(MSR_GS_BASE, (u64)&c->cpu);
writemsr(MSR_GS_KERNBASE, (u64)&c->cpu); writemsr(MSR_GS_KERNBASE, (u64)&c->cpu);
c->cpu = c; c->cpu = c;
c->proc = nullptr; c->proc = nullptr;
c->kmem = &kmems[cpunum()]; c->kmem = &kmems[id];
} }
atomic<u64> tlbflush_req; atomic<u64> tlbflush_req;
......
...@@ -139,12 +139,12 @@ initidle(void) ...@@ -139,12 +139,12 @@ initidle(void)
if (!p) if (!p)
panic("initidle proc::alloc"); panic("initidle proc::alloc");
SLIST_INIT(&idlem[cpunum()].zombies); SLIST_INIT(&idlem[myid()].zombies);
initlock(&idlem[cpunum()].lock, "idle_lock", LOCKSTAT_IDLE); initlock(&idlem[myid()].lock, "idle_lock", LOCKSTAT_IDLE);
snprintf(p->name, sizeof(p->name), "idle_%u", cpunum()); snprintf(p->name, sizeof(p->name), "idle_%u", myid());
mycpu()->proc = p; mycpu()->proc = p;
myproc()->cpuid = cpunum(); myproc()->cpuid = myid();
myproc()->cpu_pin = 1; myproc()->cpu_pin = 1;
idlem->cur = p; idlem->cur = p;
} }
...@@ -137,8 +137,8 @@ lapicpc(char mask) ...@@ -137,8 +137,8 @@ lapicpc(char mask)
lapicw(PCINT, mask ? MASKED : MT_NMI); lapicw(PCINT, mask ? MASKED : MT_NMI);
} }
int hwid_t
cpunum(void) lapicid(void)
{ {
// Cannot call cpu when interrupts are enabled: // Cannot call cpu when interrupts are enabled:
// result not guaranteed to last long enough to be used! // result not guaranteed to last long enough to be used!
......
...@@ -43,8 +43,8 @@ static volatile int bstate; ...@@ -43,8 +43,8 @@ static volatile int bstate;
void void
mpboot(void) mpboot(void)
{ {
initseg();
inittls(); inittls();
initseg();
initlapic(); initlapic();
initsamp(); initsamp();
initidle(); initidle();
...@@ -70,7 +70,7 @@ bootothers(void) ...@@ -70,7 +70,7 @@ bootothers(void)
memmove(code, _bootother_start, _bootother_size); memmove(code, _bootother_start, _bootother_size);
for(c = cpus; c < cpus+ncpu; c++){ for(c = cpus; c < cpus+ncpu; c++){
if(c == cpus+cpunum()) // We've started already. if(c == cpus+myid()) // We've started already.
continue; continue;
// Tell bootother.S what stack to use and the address of apstart; // Tell bootother.S what stack to use and the address of apstart;
......
...@@ -115,11 +115,12 @@ initmp(void) ...@@ -115,11 +115,12 @@ initmp(void)
panic("initmp: too many CPUs"); panic("initmp: too many CPUs");
if(ncpu != proc->apicid){ if(ncpu != proc->apicid){
cprintf("mpinit: ncpu=%d apicid=%d\n", ncpu, proc->apicid); cprintf("mpinit: ncpu=%d apicid=%d\n", ncpu, proc->apicid);
ismp = 0; //ismp = 0;
} }
if(proc->flags & MPBOOT) if(proc->flags & MPBOOT)
bcpu = &cpus[ncpu]; bcpu = &cpus[ncpu];
cpus[ncpu].id = ncpu; cpus[ncpu].id = ncpu;
cpus[ncpu].hwid = proc->apicid;
ncpu++; ncpu++;
p += sizeof(struct mpproc); p += sizeof(struct mpproc);
continue; continue;
......
...@@ -244,7 +244,7 @@ sampwrite(struct inode *ip, char *buf, u32 off, u32 n) ...@@ -244,7 +244,7 @@ sampwrite(struct inode *ip, char *buf, u32 off, u32 n)
void void
initsamp(void) initsamp(void)
{ {
if (cpunum() == mpbcpu()) { if (myid() == mpbcpu()) {
u32 name[4]; u32 name[4];
char *s = (char *)name; char *s = (char *)name;
name[3] = 0; name[3] = 0;
...@@ -267,8 +267,8 @@ initsamp(void) ...@@ -267,8 +267,8 @@ initsamp(void)
void *p = ksalloc(slab_perf); void *p = ksalloc(slab_perf);
if (p == nullptr) if (p == nullptr)
panic("initprof: ksalloc"); panic("initprof: ksalloc");
pmulog[cpunum()].event = (pmuevent*) p; pmulog[myid()].event = (pmuevent*) p;
pmulog[cpunum()].capacity = PERFSIZE / sizeof(struct pmuevent); pmulog[myid()].capacity = PERFSIZE / sizeof(struct pmuevent);
devsw[SAMPLER].write = sampwrite; devsw[SAMPLER].write = sampwrite;
devsw[SAMPLER].read = sampread; devsw[SAMPLER].read = sampread;
......
...@@ -21,7 +21,7 @@ u64 ...@@ -21,7 +21,7 @@ u64
sysentry_c(u64 a0, u64 a1, u64 a2, u64 a3, u64 a4, u64 num) sysentry_c(u64 a0, u64 a1, u64 a2, u64 a3, u64 a4, u64 num)
{ {
writegs(KDSEG); writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&cpus[cpunum()].cpu); writemsr(MSR_GS_BASE, (u64)&cpus[lapicid()].cpu);
sti(); sti();
...@@ -46,7 +46,7 @@ void ...@@ -46,7 +46,7 @@ void
trap(struct trapframe *tf) trap(struct trapframe *tf)
{ {
writegs(KDSEG); writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&cpus[cpunum()].cpu); writemsr(MSR_GS_BASE, (u64)&cpus[lapicid()].cpu);
if (tf->trapno == T_NMI) { if (tf->trapno == T_NMI) {
// The only locks that we can acquire during NMI are ones // The only locks that we can acquire during NMI are ones
...@@ -230,7 +230,7 @@ initseg(void) ...@@ -230,7 +230,7 @@ initseg(void)
lidt((void *)&dtr.limit); lidt((void *)&dtr.limit);
// TLS might not be ready // TLS might not be ready
c = &cpus[cpunum()]; c = &cpus[myid()];
// Load per-CPU GDT // Load per-CPU GDT
memmove(c->gdt, bootgdt, sizeof(bootgdt)); memmove(c->gdt, bootgdt, sizeof(bootgdt));
dtr.limit = sizeof(c->gdt) - 1; dtr.limit = sizeof(c->gdt) - 1;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论