提交 0fae9896 创建 作者: Silas Boyd-Wickizer's avatar Silas Boyd-Wickizer

Rejigger kalloc.c, add a lame block/slab allocator.

上级 12d384cb
...@@ -16,7 +16,18 @@ static u64 nmem; ...@@ -16,7 +16,18 @@ static u64 nmem;
static u64 membytes; static u64 membytes;
struct kmem kmems[NCPU]; struct kmem kmems[NCPU];
struct kmem kstacks[NCPU]; static struct kmem slabmem[][NCPU] = {
[slab_stack][0 ... NCPU-1] = {
.name = " kstack",
.size = KSTACKSIZE,
.ninit = CPUKSTACKS,
},
[slab_perf][0 ... NCPU-1] = {
.name = " kperf",
.size = PERFSIZE,
.ninit = 1,
},
};
extern char end[]; // first address after kernel loaded from ELF file extern char end[]; // first address after kernel loaded from ELF file
char *newend; char *newend;
...@@ -189,11 +200,10 @@ kalloc(void) ...@@ -189,11 +200,10 @@ kalloc(void)
return kmemalloc(kmems); return kmemalloc(kmems);
} }
// Allocate KSTACKSIZE bytes. void *
char * ksalloc(slab_t slab)
ksalloc(void)
{ {
return kmemalloc(kstacks); return kmemalloc(slabmem[slab]);
} }
// Memory allocator by Kernighan and Ritchie, // Memory allocator by Kernighan and Ritchie,
...@@ -221,6 +231,21 @@ kminit(void) ...@@ -221,6 +231,21 @@ kminit(void)
} }
} }
void
slabinit(struct kmem *k, char **p, u64 *off)
{
for (int i = 0; i < k->ninit; i++) {
if (*p == (void *)-1)
panic("slabinit: memnext");
// XXX(sbw) handle this condition
if (memsize(p) < k->size)
panic("slabinit: memsize");
kfree_pool(k, *p);
*p = memnext(*p, k->size);
*off = *off+k->size;
}
}
// Initialize free list of physical pages. // Initialize free list of physical pages.
void void
initkalloc(u64 mbaddr) initkalloc(u64 mbaddr)
...@@ -238,11 +263,12 @@ initkalloc(u64 mbaddr) ...@@ -238,11 +263,12 @@ initkalloc(u64 mbaddr)
kmems[c].size = PGSIZE; kmems[c].size = PGSIZE;
} }
for (int c = 0; c < NCPU; c++) { for (int i = 0; i < NELEM(slabmem); i++) {
kstacks[c].name[0] = (char) c + '0'; for (int c = 0; c < NCPU; c++) {
safestrcpy(kstacks[c].name+1, "kstack", MAXNAME-1); slabmem[i][c].name[0] = (char) c + '0';
initlock(&kstacks[c].lock, kstacks[c].name); initlock(&slabmem[i][c].lock,
kstacks[c].size = KSTACKSIZE; slabmem[i][c].name);
}
} }
cprintf("%lu mbytes\n", membytes / (1<<20)); cprintf("%lu mbytes\n", membytes / (1<<20));
...@@ -255,16 +281,9 @@ initkalloc(u64 mbaddr) ...@@ -255,16 +281,9 @@ initkalloc(u64 mbaddr)
p = (char*)PGROUNDUP((uptr)newend); p = (char*)PGROUNDUP((uptr)newend);
k = (((uptr)p) - KBASE); k = (((uptr)p) - KBASE);
for (int c = 0; c < NCPU; c++) { for (int c = 0; c < NCPU; c++) {
// Fill the stack allocator // Fill slab allocators
for (int i = 0; i < CPUKSTACKS; i++, k += KSTACKSIZE) { for (int i = 0; i < NELEM(slabmem); i++)
if (p == (void *)-1) slabinit(&slabmem[i][c], &p, &k);
panic("initkalloc: e820next");
// XXX(sbw) handle this condition
if (memsize(p) < KSTACKSIZE)
panic("initkalloc: e820size");
kfree_pool(&kstacks[c], p);
p = memnext(p, KSTACKSIZE);
}
// The rest goes to the page allocator // The rest goes to the page allocator
for (; k != n; k += PGSIZE, p = memnext(p, PGSIZE)) { for (; k != n; k += PGSIZE, p = memnext(p, PGSIZE)) {
......
...@@ -4,9 +4,10 @@ struct run { ...@@ -4,9 +4,10 @@ struct run {
struct kmem { struct kmem {
char name[MAXNAME]; char name[MAXNAME];
u64 size;
u64 ninit;
struct spinlock lock; struct spinlock lock;
struct run *freelist; struct run *freelist;
u64 size;
u64 nfree; u64 nfree;
} __mpalign__; } __mpalign__;
......
...@@ -114,8 +114,12 @@ void iderw(struct buf*); ...@@ -114,8 +114,12 @@ void iderw(struct buf*);
void ioapicenable(int irq, int cpu); void ioapicenable(int irq, int cpu);
// kalloc.c // kalloc.c
typedef enum {
slab_stack,
slab_perf,
} slab_t;
char* kalloc(void); char* kalloc(void);
char* ksalloc(void); void* ksalloc(slab_t);
void kfree(void *); void kfree(void *);
void* kmalloc(u64); void* kmalloc(u64);
void kmfree(void*); void kmfree(void*);
......
...@@ -62,7 +62,7 @@ bootothers(void) ...@@ -62,7 +62,7 @@ bootothers(void)
// Tell bootother.S what stack to use and the address of apstart; // Tell bootother.S what stack to use and the address of apstart;
// it expects to find these two addresses stored just before // it expects to find these two addresses stored just before
// its first instruction. // its first instruction.
stack = ksalloc(); stack = ksalloc(slab_stack);
*(u32*)(code-4) = (u32)v2p(&apstart); *(u32*)(code-4) = (u32)v2p(&apstart);
*(u64*)(code-12) = (u64)stack + KSTACKSIZE; *(u64*)(code-12) = (u64)stack + KSTACKSIZE;
......
...@@ -19,14 +19,17 @@ ...@@ -19,14 +19,17 @@
#define NCPU 16 // maximum number of CPUs #define NCPU 16 // maximum number of CPUs
#define MTRACE 0 #define MTRACE 0
#define WQENABLE 0 // Enable work queue #define WQENABLE 0 // Enable work queue
#define PERFSIZE 1<<30ull
#elif defined(HW_qemu) #elif defined(HW_qemu)
#define NCPU 4 // maximum number of CPUs #define NCPU 4 // maximum number of CPUs
#define MTRACE 0 #define MTRACE 0
#define WQENABLE 0 // Enable work queue #define WQENABLE 0 // Enable work queue
#define PERFSIZE 16<<20ull
#elif defined(HW_ud0) #elif defined(HW_ud0)
#define NCPU 4 // maximum number of CPUs #define NCPU 4 // maximum number of CPUs
#define MTRACE 0 #define MTRACE 0
#define WQENABLE 0 // Enable work queue #define WQENABLE 0 // Enable work queue
#define PERFSIZE 512<<20ull
#else #else
#error "Unknown HW" #error "Unknown HW"
#endif #endif
...@@ -201,7 +201,7 @@ allocproc(void) ...@@ -201,7 +201,7 @@ allocproc(void)
panic("allocproc: ns_insert"); panic("allocproc: ns_insert");
// Allocate kernel stack if possible. // Allocate kernel stack if possible.
if((p->kstack = ksalloc()) == 0){ if((p->kstack = ksalloc(slab_stack)) == 0){
if (ns_remove(nspid, KI(p->pid), p) == 0) if (ns_remove(nspid, KI(p->pid), p) == 0)
panic("allocproc: ns_remove"); panic("allocproc: ns_remove");
gc_delayed(p, kmfree); gc_delayed(p, kmfree);
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论