提交 e096f2f8 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Separate paging code from vmap code so it's easier to explore vm.c.

上级 561b41ed
......@@ -32,6 +32,7 @@ OBJS = \
fs.o \
ioapic.o \
lapic.o \
hwvm.o \
hz.o \
kalloc.o \
kbd.o \
......
#include "param.h"
#include "types.h"
#include "amd64.h"
#include "mmu.h"
#include "cpu.h"
#include "kernel.h"
#include "bits.h"
#include "spinlock.h"
#include "kalloc.h"
#include "queue.h"
#include "condvar.h"
#include "proc.h"
#include <stddef.h>
extern pml4e_t kpml4[];
static pme_t*
descend(pme_t *dir, const void *va, u64 flags, int create, int level)
{
pme_t entry;
pme_t *next;
retry:
dir = &dir[PX(level, va)];
entry = *dir;
if (entry & PTE_P) {
next = p2v(PTE_ADDR(entry));
} else {
if (!create)
return NULL;
next = (pme_t*) kalloc();
if (!next)
return NULL;
memset(next, 0, PGSIZE);
if (!cmpswap(dir, entry, v2p(next) | PTE_P | PTE_W | flags)) {
kfree((void*) next);
goto retry;
}
}
return next;
}
// Return the address of the PTE in page table pgdir
// that corresponds to linear address va. If create!=0,
// create any required page table pages.
pme_t *
walkpgdir(pml4e_t *pml4, const void *va, int create)
{
pme_t *pdp;
pme_t *pd;
pme_t *pt;
pdp = descend(pml4, va, PTE_U, create, 3);
if (pdp == NULL)
return NULL;
pd = descend(pdp, va, PTE_U, create, 2);
if (pd == NULL)
return NULL;
pt = descend(pd, va, PTE_U, create, 1);
if (pt == NULL)
return NULL;
return &pt[PX(0,va)];
}
void
updatepages(pme_t *pml4, void *begin, void *end, int perm)
{
char *a, *last;
pme_t *pte;
a = PGROUNDDOWN(begin);
last = PGROUNDDOWN(end);
for (;;) {
pte = walkpgdir(pml4, a, 1);
if(pte != 0) {
if (perm == 0) *pte = 0;
else *pte = PTE_ADDR(*pte) | perm | PTE_P;
}
if (a == last)
break;
a += PGSIZE;
}
}
static void
pgmap(void *va, void *last, paddr pa)
{
pme_t *pdp;
pme_t *pd;
pme_t *pt;
for (;;) {
pdp = descend(kpml4, va, 0, 1, 3);
pd = descend(pdp, va, 0, 1, 2);
pt = descend(pd, va, 0, 1, 1);
pt = &pt[PX(0,va)];
*pt = pa | PTE_W | PTE_P;
if(va == last)
break;
va += PGSIZE;
pa += PGSIZE;
}
}
// set up a page table to get off the ground
void
initpg(char* (*alloc)(void))
{
// Map first 4GB to PBASE
pgmap((void *) PBASE, (void *) (PBASE+(1UL<<32)), 0);
// boot.S maps first 1GB to KBASE and gets us running with kpml4
}
// Set up kernel part of a page table.
pml4e_t*
setupkvm(void)
{
pml4e_t *pml4;
int k;
if((pml4 = (pml4e_t*)kalloc()) == 0)
return 0;
k = PX(3, PBASE);
memset(&pml4[0], 0, 8*k);
memmove(&pml4[k], &kpml4[k], 8*(512-k));
return pml4;
}
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
switchkvm(void)
{
lcr3(v2p(kpml4)); // switch to the kernel page table
}
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm(struct proc *p)
{
u64 base = (u64) &mycpu()->ts;
pushcli();
mycpu()->gdt[TSSSEG>>3] = (struct segdesc)
SEGDESC(base, (sizeof(mycpu()->ts)-1), SEG_P|SEG_TSS64A);
mycpu()->gdt[(TSSSEG>>3)+1] = (struct segdesc) SEGDESCHI(base);
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0)
panic("switchuvm: no vmap/pml4");
lcr3(v2p(p->vmap->pml4)); // switch to new address space
popcli();
}
static void
freepm(pme_t *pm, int level)
{
int i;
if (level != 0) {
for (i = 0; i < 512; i++) {
if (pm[i] & PTE_P)
freepm(p2v(PTE_ADDR(pm[i])), level - 1);
}
}
kfree(pm);
}
// Free a page table and all the physical memory pages
// in the user part.
void
freevm(pml4e_t *pml4)
{
int k;
int i;
if(pml4 == 0)
panic("freevm: no pgdir");
// Don't free kernel portion of the pml4
k = PX(3, PBASE);
for (i = 0; i < k; i++) {
if (pml4[i] & PTE_P) {
freepm(p2v(PTE_ADDR(pml4[i])), 2);
}
}
kfree(pml4);
}
......@@ -81,6 +81,11 @@ int dirlink(struct inode*, char*, u32);
void dir_init(struct inode *dp);
void dir_flush(struct inode *dp);
// hwvm.c
void freevm(pml4e_t*);
pml4e_t* setupkvm(void);
pme_t * walkpgdir(pml4e_t*, const void*, int);
// hz.c
void microdelay(u64);
......
......@@ -12,68 +12,6 @@
#include "proc.h"
#include <stddef.h>
extern char data[]; // defined in data.S
extern pml4e_t kpml4[];
extern char* pgalloc(void);
static pme_t*
descend(pme_t *dir, const void *va, u64 flags, int create, int level)
{
pme_t entry;
pme_t *next;
retry:
dir = &dir[PX(level, va)];
entry = *dir;
if (entry & PTE_P) {
next = p2v(PTE_ADDR(entry));
} else {
if (!create)
return NULL;
next = (pme_t*) pgalloc();
if (!next)
return NULL;
memset(next, 0, PGSIZE);
if (!cmpswap(dir, entry, v2p(next) | PTE_P | PTE_W | flags)) {
kfree((void*) next);
goto retry;
}
}
return next;
}
static void
pgmap(void *va, void *last, paddr pa)
{
pme_t *pdp;
pme_t *pd;
pme_t *pt;
for (;;) {
pdp = descend(kpml4, va, 0, 1, 3);
pd = descend(pdp, va, 0, 1, 2);
pt = descend(pd, va, 0, 1, 1);
pt = &pt[PX(0,va)];
*pt = pa | PTE_W | PTE_P;
if(va == last)
break;
va += PGSIZE;
pa += PGSIZE;
}
}
// set up a page table to get off the ground
void
initpg(char* (*alloc)(void))
{
// Map first 4GB to PBASE
pgmap((void *) PBASE, (void *) (PBASE+(1UL<<32)), 0);
// boot.S maps first 1GB to KBASE and gets us running with kpml4
}
// Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void
......@@ -90,47 +28,6 @@ inittls(void)
c->kmem = &kmems[cpunum()];
}
// Set up kernel part of a page table.
static pml4e_t*
setupkvm(void)
{
pml4e_t *pml4;
int k;
if((pml4 = (pml4e_t*)kalloc()) == 0)
return 0;
k = PX(3, PBASE);
memset(&pml4[0], 0, 8*k);
memmove(&pml4[k], &kpml4[k], 8*(512-k));
return pml4;
}
// Switch h/w page table register to the kernel-only page table,
// for when no process is running.
void
switchkvm(void)
{
lcr3(v2p(kpml4)); // switch to the kernel page table
}
// Switch TSS and h/w page table to correspond to process p.
void
switchuvm(struct proc *p)
{
u64 base = (u64) &mycpu()->ts;
pushcli();
mycpu()->gdt[TSSSEG>>3] = (struct segdesc)
SEGDESC(base, (sizeof(mycpu()->ts)-1), SEG_P|SEG_TSS64A);
mycpu()->gdt[(TSSSEG>>3)+1] = (struct segdesc) SEGDESCHI(base);
mycpu()->ts.rsp[0] = (u64) myproc()->kstack + KSTACKSIZE;
mycpu()->ts.iomba = (u16)offsetof(struct taskstate, iopb);
ltr(TSSSEG);
if(p->vmap == 0 || p->vmap->pml4 == 0)
panic("switchuvm: no vmap/pml4");
lcr3(v2p(p->vmap->pml4)); // switch to new address space
popcli();
}
static struct vma *
vma_alloc(void)
{
......@@ -160,43 +57,6 @@ vma_free(void *p)
kmfree(e);
}
static void
freepm(pme_t *pm, int level)
{
int i;
if (level != 0) {
for (i = 0; i < 512; i++) {
if (pm[i] & PTE_P)
freepm(p2v(PTE_ADDR(pm[i])), level - 1);
}
}
kfree(pm);
}
// Free a page table and all the physical memory pages
// in the user part.
static void
freevm(pml4e_t *pml4)
{
int k;
int i;
if(pml4 == 0)
panic("freevm: no pgdir");
// Don't free kernel portion of the pml4
k = PX(3, PBASE);
for (i = 0; i < k; i++) {
if (pml4[i] & PTE_P) {
freepm(p2v(PTE_ADDR(pml4[i])), 2);
}
}
kfree(pml4);
}
#ifdef TREE
struct state {
int share;
......@@ -441,48 +301,6 @@ copyout(struct vmap *vmap, uptr va, void *p, u64 len)
return 0;
}
// Return the address of the PTE in page table pgdir
// that corresponds to linear address va. If create!=0,
// create any required page table pages.
static pme_t *
walkpgdir(pml4e_t *pml4, const void *va, int create)
{
pme_t *pdp;
pme_t *pd;
pme_t *pt;
pdp = descend(pml4, va, PTE_U, create, 3);
if (pdp == NULL)
return NULL;
pd = descend(pdp, va, PTE_U, create, 2);
if (pd == NULL)
return NULL;
pt = descend(pd, va, PTE_U, create, 1);
if (pt == NULL)
return NULL;
return &pt[PX(0,va)];
}
void
updatepages(pme_t *pml4, void *begin, void *end, int perm)
{
char *a, *last;
pme_t *pte;
a = PGROUNDDOWN(begin);
last = PGROUNDDOWN(end);
for (;;) {
pte = walkpgdir(pml4, a, 1);
if(pte != 0) {
if (perm == 0) *pte = 0;
else *pte = PTE_ADDR(*pte) | perm | PTE_P;
}
if (a == last)
break;
a += PGSIZE;
}
}
struct vmnode *
vmn_copy(struct vmnode *n)
{
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论