提交 6f4a90b3 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Bungle TLS on x86-64.

上级 84878e55
......@@ -38,3 +38,7 @@
#define CR0_NW 0x20000000 // Not Writethrough
#define CR0_CD 0x40000000 // Cache Disable
#define CR0_PG 0x80000000 // Paging
// FS/GS base registers
#define MSR_FS_BASE 0xc0000100
#define MSR_GS_BASE 0xc0000101
#include "mmu.h"
// Per-CPU state
struct cpu {
u8 id; // Local APIC ID; index into cpus[] below
int ncli; // Depth of pushcli nesting.
int intena; // Were interrupts enabled before pushcli?
struct segdesc gdt[NSEGS]; // x86 global descriptor table
// Cpu-local storage variables; see below
struct cpu *cpu;
struct proc *proc; // The currently-running process.
struct kmem *kmem; // The per-core memory table
#if 0
struct context *scheduler; // swtch() here to enter scheduler
struct taskstate ts; // Used by x86 to find stack for interrupt
struct segdesc gdt[NSEGS]; // x86 global descriptor table
volatile uint booted; // Has the CPU started?
int last_rcu_gc_ticks;
// Cpu-local storage variables; see below
struct cpu *cpu;
struct proc *proc; // The currently-running process.
struct kmem *kmem; // The per-core memory table
#endif
} __mpalign__;
extern struct cpu cpus[NCPU];
// Per-CPU variables, holding pointers to the
// current cpu and to the current process.
// The asm suffix tells gcc to use "%gs:0" to refer to cpu
// and "%gs:4" to refer to proc. seginit sets up the
// %gs segment register so that %gs refers to the memory
// holding those two variables in the local cpu's struct cpu.
// This is similar to how thread-local variables are implemented
// in thread libraries such as Linux pthreads.
extern struct cpu *cpu __asm("%gs:0"); // &cpus[cpunum()].cpu
extern struct proc *proc __asm("%gs:8"); // cpus[cpunum()].proc
extern struct kmem *kmem __asm("%gs:16"); // &cpu[cpunum()].kmem
// XXX(sbw) asm labels default to RIP-relative and
// I don't know how to force absolute addressing.
static inline struct cpu *
mycpu(void)
{
u64 val;
__asm volatile("movq %%gs:0, %0" : "=r" (val));
return (struct cpu *)val;
}
static inline struct proc *
myproc(void)
{
u64 val;
__asm volatile("movq %%gs:8, %0" : "=r" (val));
return (struct proc *)val;
}
static inline struct kmem *
mykmem(void)
{
u64 val;
__asm volatile("movq %%gs:16, %0" : "=r" (val));
return (struct kmem *)val;
}
......@@ -6,9 +6,13 @@
#include "param.h"
#include "mmu.h"
#include "kernel.h"
#include "spinlock.h"
#include "kalloc.h"
void kminit(void);
struct kmem kmems[NCPU];
extern char end[]; // first address after kernel loaded from ELF file
char *newend;
enum { kalloc_memset = 0 };
......
......@@ -6,7 +6,7 @@ struct kmem {
char name[MAXNAME];
struct spinlock lock;
struct run *freelist;
uint nfree;
u64 nfree;
} __attribute__ ((aligned (CACHELINE)));
extern struct kmem kmems[NCPU];
......@@ -42,3 +42,6 @@ void uartputc(char c);
// mp.c
extern int ncpu;
// lapic.c
int cpunum(void);
......@@ -9,6 +9,7 @@ extern void initmp(void);
extern void initlapic(void);
extern void inittrap(void);
extern void initpg(void);
extern void initseg(void);
void
cmain(void)
......@@ -19,6 +20,7 @@ cmain(void)
initpg();
initmp();
initlapic();
initseg();
#if 0
inittrap();
#endif
......
#pragma once
#define PGSIZE 4096
#define PGSHIFT 12 // log2(PGSIZE)
......@@ -35,3 +37,29 @@
// Address in page table or page directory entry
#define PTE_ADDR(pte) ((uptr)(pte) & ~0xFFF)
struct segdesc {
int lim_15_0 : 16; // Low bits of segment limit
int base_15_0 : 16; // Low bits of segment base address
int base_23_16 : 8; // Middle bits of segment base address
int type : 4; // Segment type (see STS_ constants)
int s : 1; // 0 = system, 1 = application
int dpl : 2; // Descriptor Privilege Level
int p : 1; // Present
int lim_19_16 : 4; // High bits of segment limit
int avl : 1; // Unused (available for software use)
int rsv1 : 1; // Reserved
int db : 1; // 0 = 16-bit segment, 1 = 32-bit segment
int g : 1; // Granularity: limit scaled by 4K when set
int base_31_24 : 8; // High bits of segment base address
};
// Segment selectors (indexes) in our GDTs.
// Defined by our convention, not the architecture.
#define KCSEG32 (1<<3) /* kernel 32-bit code segment */
#define KCSEG (2<<3) /* kernel code segment */
#define KDSEG (3<<3) /* kernel data segment */
#define TSSSEG (4<<3) /* tss segment - takes two slots */
#define UCSEG (6<<3) /* user code segment */
#define UDSEG (7<<3) /* user data segment */
#define NSEGS 8
......@@ -61,7 +61,7 @@ acquire(struct spinlock *lk)
#if SPINLOCK_DEBUG
// Record info about lock acquisition for debugging.
lk->cpu = cpu;
lk->cpu = mycpu();
getcallerpcs(&lk, lk->pcs);
#endif
}
......@@ -129,7 +129,7 @@ getcallerpcs(void *v, uptr pcs[])
int
holding(struct spinlock *lock)
{
return lock->locked && lock->cpu == cpu;
return lock->locked && lock->cpu == mycpu();
}
#endif
......@@ -145,8 +145,8 @@ pushcli(void)
rflags = readrflags();
cli();
if(cpu->ncli++ == 0)
cpu->intena = rflags & FL_IF;
if(mycpu()->ncli++ == 0)
mycpu()->intena = rflags & FL_IF;
}
void
......@@ -154,8 +154,8 @@ popcli(void)
{
if(readrflags()&FL_IF)
panic("popcli - interruptible");
if(--cpu->ncli < 0)
if(--mycpu()->ncli < 0)
panic("popcli");
if(cpu->ncli == 0 && cpu->intena)
if(mycpu()->ncli == 0 && mycpu()->intena)
sti();
}
......@@ -2,8 +2,12 @@
#include "types.h"
#include "x86.h"
#include "mmu.h"
#include "cpu.h"
#include "kernel.h"
#include "memlayout.h"
#include "bits.h"
#include "spinlock.h"
#include "kalloc.h"
extern char data[]; // defined in data.S
......@@ -11,35 +15,6 @@ extern pml4e_t kpml4[];
extern char* pgalloc(void);
#if 0
// page map for during boot
// XXX build a static page table in assembly
static void
pgmap(void *va, void *last, uint pa)
{
pde_t *pde;
pte_t *pgtab;
pte_t *pte;
for(;;){
pde = &kpgdir[PDX(va)];
pde_t pdev = *pde;
if (pdev == 0) {
pgtab = (pte_t *) pgalloc();
*pde = v2p(pgtab) | PTE_P | PTE_W;
} else {
pgtab = (pte_t*)p2v(PTE_ADDR(pdev));
}
pte = &pgtab[PTX(va)];
*pte = pa | PTE_W | PTE_P;
if(va == last)
break;
va += PGSIZE;
pa += PGSIZE;
}
}
#endif
static void
pgmap(void *va, void *last, paddr pa)
{
......@@ -94,39 +69,26 @@ initpg(char* (*alloc)(void))
{
pgmap((void *) 0, (void *) PHYSTOP, 0);
pgmap((void *) PBASE, (void *) (PBASE+(1UL<<32)), 0);
//switchkvm(); // load kpgdir into cr3
// boot.S gets us running with kpml4
}
#if 0
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
seginit(void)
initseg(void)
{
struct cpu *c;
// Map virtual addresses to linear addresses using identity map.
// Cannot share a CODE descriptor for both kernel and user
// because it would have to have DPL_USR, but the CPU forbids
// an interrupt from CPL=0 to DPL=3.
c = &cpus[cpunum()];
c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
// Map cpu, curproc, kmem
c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 12, 0);
lgdt((void *)(c->gdt), sizeof(c->gdt));
loadgs(SEG_KCPU << 3);
// Initialize cpu-local storage.
cpu = c;
proc = 0;
kmem = &kmems[cpunum()];
c = &cpus[cpunum()];
writegs(KDSEG);
writemsr(MSR_GS_BASE, (u64)&c->cpu);
c->cpu = c;
c->proc = NULL;
c->kmem = &kmems[cpunum()];
}
#if 0
void
printpgdir(pde_t *pgdir)
{
......
......@@ -69,3 +69,25 @@ nop_pause(void)
{
__asm volatile("pause" : :);
}
static inline void
writegs(u16 v)
{
__asm volatile("movw %0, %%gs" : : "r" (v));
}
static inline u64
readmsr(u32 msr)
{
u32 hi, lo;
__asm volatile("rdmsr" : "=d" (hi), "=a" (lo) : "c" (msr));
return ((u64) lo) | (((u64) hi) << 32);
}
static inline void
writemsr(u32 msr, u64 val)
{
u32 lo = val & 0xffffffff;
u32 hi = val >> 32;
__asm volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi));
}
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论