提交 524b494f 创建 作者: Frans Kaashoek's avatar Frans Kaashoek

Merge branch 'scale-amd64' of ssh://amsterdam.csail.mit.edu/home/am0/6.828/xv6 into scale-amd64

Conflicts: kernel.h
......@@ -32,6 +32,7 @@ OBJS = \
condvar.o \
console.o \
crange.o \
e1000.o \
exec.o \
file.o \
fs.o \
......@@ -44,7 +45,9 @@ OBJS = \
main.o \
memide.o \
mp.o \
net.o \
ns.o \
pci.o \
picirq.o \
pipe.o \
proc.o \
......@@ -84,6 +87,10 @@ UPROGS= \
_halt
UPROGS := $(addprefix $(O)/, $(UPROGS))
all: $(O)/kernel
include net.mk
$(O)/kernel: $(O) $(O)/boot.o $(OBJS)
@echo " LD $@"
$(Q)$(LD) $(LDFLAGS) -T kernel.ld -z max-page-size=4096 -e start \
......@@ -141,7 +148,9 @@ $(O)/mscan.kern: $(O)/kernel
##
## qemu
##
QEMUOPTS = -smp $(QEMUSMP) -m 512 -serial mon:stdio -nographic
QEMUOPTS = -smp $(QEMUSMP) -m 512 -serial mon:stdio -nographic \
-net user -net nic,model=e1000 \
-net dump,file=qemu.pcap
qemu: $(O)/kernel
$(QEMU) $(QEMUOPTS) -kernel $(O)/kernel
......
......@@ -28,6 +28,15 @@ inb(u16 port)
return data;
}
static inline u32
inl(u16 port)
{
u32 data = 0;
__asm volatile("inl %w1,%0" : "=a" (data) : "d" (port));
return data;
}
static inline void
outb(u16 port, u8 data)
{
......@@ -35,6 +44,12 @@ outb(u16 port, u8 data)
}
static inline void
outl(u16 port, u32 data)
{
__asm volatile("outl %0,%w1" : : "a" (data), "d" (port));
}
static inline void
stosb(void *addr, int data, int cnt)
{
__asm volatile("cld; rep stosb" :
......
......@@ -134,9 +134,9 @@ tramp64:
.comm stack, STACK
# Page tables. See section 4.5 of 253668.pdf.
# We map the first GB of physical memory at 0 and at -2 GB (or 2 GB before
# the end of physical memory). At boot time we are using the mapping at 0
# but during ordinary execution we use the high mapping.
# We map the first GB of physical memory at 0 and at KBASE. At boot
# time we are using the mapping at 0 but during ordinary execution we
# use the high mapping.
# The intent is that after bootstrap the kernel can expand this mapping
# to cover all the available physical memory.
# This would be easier if we could use the PS bit to create GB-sized entries
......
......@@ -8,15 +8,60 @@
#include "kernel.h"
#include "cpu.h"
void
initcondvar(struct condvar *cv, char *n)
struct spinlock tickslock __mpalign__;
struct condvar cv_ticks __mpalign__;
u64 ticks __mpalign__;
LIST_HEAD(sleepers, proc) sleepers __mpalign__;
struct spinlock sleepers_lock;
static void
wakeup(struct proc *p)
{
initlock(&cv->lock, n);
cv->waiters = 0;
LIST_REMOVE(p, cv_waiters);
p->oncv = 0;
addrun(p);
p->state = RUNNABLE;
}
void
cv_sleep(struct condvar *cv, struct spinlock *lk)
cv_tick(void)
{
struct proc *p, *tmp;
int again;
u64 now;
acquire(&tickslock);
ticks++;
cv_wakeup(&cv_ticks);
release(&tickslock);
now = nsectime();
again = 0;
do {
acquire(&sleepers_lock);
LIST_FOREACH_SAFE(p, &sleepers, cv_sleep, tmp) {
if (p->cv_wakeup <= now) {
if (tryacquire(&p->lock)) {
if (tryacquire(&p->oncv->lock)) {
LIST_REMOVE(p, cv_sleep);
p->cv_wakeup = 0;
wakeup(p);
release(&p->lock);
release(&p->oncv->lock);
continue;
} else {
release(&p->lock);
}
}
again = 1;
}
}
release(&sleepers_lock);
} while (again);
}
void cv_sleepto(struct condvar *cv, struct spinlock *lk, u64 timeout)
{
if(myproc() == 0)
panic("sleep");
......@@ -31,44 +76,61 @@ cv_sleep(struct condvar *cv, struct spinlock *lk)
acquire(&myproc()->lock);
if(myproc()->cv_next || myproc()->oncv)
panic("cv_sleep cv_next");
myproc()->cv_next = cv->waiters;
cv->waiters = myproc();
myproc()->state = SLEEPING;
if(myproc()->oncv)
panic("cv_sleep oncv");
LIST_INSERT_HEAD(&cv->waiters, myproc(), cv_waiters);
myproc()->oncv = cv;
myproc()->state = SLEEPING;
if (timeout) {
acquire(&sleepers_lock);
myproc()->cv_wakeup = timeout;
LIST_INSERT_HEAD(&sleepers, myproc(), cv_sleep);
release(&sleepers_lock);
}
release(&cv->lock);
sched();
release(&myproc()->lock);
// Reacquire original lock.
acquire(lk);
}
void
cv_sleep(struct condvar *cv, struct spinlock *lk)
{
cv_sleepto(cv, lk, 0);
}
// Wake up all processes sleeping on this condvar.
void
cv_wakeup(struct condvar *cv)
{
// XXX race with cv_sleep()
// if (!cv->waiters)
// return;
struct proc *p, *tmp;
acquire(&cv->lock);
while(cv->waiters) {
struct proc *p = cv->waiters;
LIST_FOREACH_SAFE(p, &cv->waiters, cv_waiters, tmp) {
acquire(&p->lock);
if(p->state != SLEEPING || p->oncv != cv)
panic("cv_wakeup");
struct proc *nxt = p->cv_next;
p->cv_next = 0;
p->oncv = 0;
addrun(p);
p->state = RUNNABLE;
if (p->cv_wakeup) {
acquire(&sleepers_lock);
LIST_REMOVE(p, cv_sleep);
p->cv_wakeup = 0;
release(&sleepers_lock);
}
wakeup(p);
release(&p->lock);
cv->waiters = nxt;
}
release(&cv->lock);
}
void
initcondvar(struct condvar *cv, char *n)
{
initlock(&cv->lock, n);
LIST_INIT(&cv->waiters);
}
#include "queue.h"
struct condvar {
struct spinlock lock;
struct proc *waiters;
LIST_HEAD(waiters, proc) waiters;
};
#include "types.h"
#include "amd64.h"
#include "kernel.h"
#include "pci.h"
#include "e1000reg.h"
#define TX_RING_SIZE 64
#define RX_RING_SIZE 64
int e1000irq;
static struct {
u32 membase;
u32 iobase;
u16 pcidevid;
u32 txclean;
u32 txinuse;
u32 rxclean;
u32 rxuse;
struct wiseman_txdesc txd[TX_RING_SIZE] __attribute__((aligned (16)));
struct wiseman_rxdesc rxd[RX_RING_SIZE] __attribute__((aligned (16)));
} e1000;
static inline u32
erd(u32 reg)
{
paddr pa = e1000.membase + reg;
volatile u32 *ptr = p2v(pa);
return *ptr;
}
static inline void
ewr(u32 reg, u32 val)
{
paddr pa = e1000.membase + reg;
volatile u32 *ptr = p2v(pa);
*ptr = val;
}
static int
eeprom_eerd_read(u16 off)
{
u32 reg;
int x;
// [E1000 5.3.1] Software EEPROM access
ewr(WMREG_EERD, (off<<EERD_ADDR_SHIFT) | EERD_START);
for (x = 0; x < 5; x++) {
reg = erd(WMREG_EERD);
if (reg & EERD_DONE)
return (reg&EERD_DATA_MASK) >> EERD_DATA_SHIFT;
microdelay(50000);
}
return -1;
}
static int
eeprom_read(u16 *buf, int off, int count)
{
for (int i = 0; i < count; i++) {
int r = eeprom_eerd_read(off+i);
if (r < 0) {
cprintf("eeprom_read: cannot read\n");
return -1;
}
buf[i] = r;
}
return 0;
}
int
e1000tx(void *buf, u32 len)
{
struct wiseman_txdesc *desc;
u32 tail;
// WMREG_TDT should only equal WMREG_TDH when we have
// nothing to transmit. Therefore, we can accomodate
// TX_RING_SIZE-1 buffers.
if (e1000.txinuse == TX_RING_SIZE-1) {
cprintf("TX ring overflow\n");
return -1;
}
tail = erd(WMREG_TDT);
desc = &e1000.txd[tail];
if (!(desc->wtx_fields.wtxu_status & WTX_ST_DD))
panic("oops");
desc->wtx_addr = v2p(buf);
desc->wtx_cmdlen = len | WTX_CMD_RS | WTX_CMD_EOP | WTX_CMD_IFCS;
memset(&desc->wtx_fields, 0, sizeof(&desc->wtx_fields));
ewr(WMREG_TDT, (tail+1) % TX_RING_SIZE);
e1000.txinuse++;
return 0;
}
static void
cleantx(void)
{
struct wiseman_txdesc *desc;
void *va;
while (e1000.txinuse) {
desc = &e1000.txd[e1000.txclean];
if (!(desc->wtx_fields.wtxu_status & WTX_ST_DD))
break;
va = p2v(desc->wtx_addr);
netfree(va);
desc->wtx_fields.wtxu_status = 0;
e1000.txclean = (e1000.txclean+1) % TX_RING_SIZE;
desc = &e1000.txd[e1000.txclean];
e1000.txinuse--;
}
}
static void
allocrx(void)
{
struct wiseman_rxdesc *desc;
void *buf;
u32 i;
i = erd(WMREG_RDT);
desc = &e1000.rxd[i];
if (desc->wrx_status & WRX_ST_DD)
panic("allocrx");
buf = netalloc();
if (buf == NULL)
panic("Oops");
desc->wrx_addr = v2p(buf);
ewr(WMREG_RDT, (i+1) % RX_RING_SIZE);
}
static void
cleanrx(void)
{
struct wiseman_rxdesc *desc;
void *va;
u16 len;
u32 i;
i = e1000.rxclean;
desc = &e1000.rxd[i];
while (desc->wrx_status & WRX_ST_DD) {
va = p2v(desc->wrx_addr);
len = desc->wrx_len;
desc->wrx_status = 0;
allocrx();
netrx(va, len);
i = (i+1) % RX_RING_SIZE;
desc = &e1000.rxd[i];
}
e1000.rxclean = i;
}
void
e1000intr(void)
{
u32 icr = erd(WMREG_ICR);
while (icr & (ICR_TXDW|ICR_RXO|ICR_RXT0)) {
if (icr & ICR_TXDW)
cleantx();
if (icr & ICR_RXT0)
cleanrx();
if (icr & ICR_RXO)
panic("ICR_RXO");
icr = erd(WMREG_ICR);
}
}
static
void e1000reset(void)
{
u32 ctrl;
paddr tpa;
paddr rpa;
ctrl = erd(WMREG_CTRL);
// [E1000 13.4.1] Assert PHY_RESET then delay as much as 10 msecs
// before clearing PHY_RESET.
ewr(WMREG_CTRL, ctrl|CTRL_PHY_RESET);
microdelay(10000);
ewr(WMREG_CTRL, ctrl);
// [E1000 13.4.1] Delay 1 usec after reset
ewr(WMREG_CTRL, ctrl|CTRL_RST);
microdelay(1);
// [E1000 13.4.41] Transmit Interrupt Delay Value of 1 usec.
// A value of 0 is not allowed. Enabled on a per-TX decriptor basis.
ewr(WMREG_TIDV, 1);
// [E1000 13.4.44] Delay TX interrupts a max of 1 usec.
ewr(WMREG_TADV, 1);
for (int i = 0; i < TX_RING_SIZE; i++)
e1000.txd[i].wtx_fields.wtxu_status = WTX_ST_DD;
// [E1000 14.5] Transmit Initialization
tpa = v2p(e1000.txd);
ewr(WMREG_TDBAH, tpa >> 32);
ewr(WMREG_TDBAL, tpa & 0xffffffff);
ewr(WMREG_TDLEN, sizeof(e1000.txd));
ewr(WMREG_TDH, 0);
ewr(WMREG_TDT, 0);
ewr(WMREG_TCTL, TCTL_EN|TCTL_PSP|TCTL_CT(0x10)|TCTL_COLD(0x40));
ewr(WMREG_TIPG, TIPG_IPGT(10)|TIPG_IPGR1(8)|TIPG_IPGR2(6));
for (int i = 0; i < RX_RING_SIZE>>1; i++) {
void *buf = netalloc();
e1000.rxd[i].wrx_addr = v2p(buf);
}
rpa = v2p(e1000.rxd);
ewr(WMREG_RDBAH, rpa >> 32);
ewr(WMREG_RDBAL, rpa & 0xffffffff);
ewr(WMREG_RDLEN, sizeof(e1000.rxd));
ewr(WMREG_RDH, 0);
ewr(WMREG_RDT, RX_RING_SIZE>>1);
ewr(WMREG_RDTR, 0);
ewr(WMREG_RADV, 0);
ewr(WMREG_RCTL,
RCTL_EN | RCTL_RDMTS_1_2 | RCTL_DPF | RCTL_BAM | RCTL_2k);
ewr(WMREG_RDT, RX_RING_SIZE-1);
// [E1000 13.4.20]
ewr(WMREG_IMC, ~0);
ewr(WMREG_IMS, ICR_TXDW | ICR_RXO | ICR_RXT0);
}
//#define E1000_WRITE_FLUSH() erd(WMREG_STATUS)
int
e1000attach(struct pci_func *pcif)
{
int r;
pci_func_enable(pcif);
e1000.membase = pcif->reg_base[0];
e1000.iobase = pcif->reg_base[2];
e1000.pcidevid = pcif->dev_id;
e1000irq = pcif->irq_line;
picenable(e1000irq);
ioapicenable(e1000irq, 0);
e1000reset();
// Get the MAC address
u16 myaddr[3];
r = eeprom_read(&myaddr[0], EEPROM_OFF_MACADDR, 3);
if (r < 0)
return 0;
u8 *addr = (u8*) &myaddr[0];
cprintf("%x:%x:%x:%x:%x:%x\n",
addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
return 0;
}
差异被折叠。
......@@ -123,12 +123,8 @@ gc(void)
void
gc_worker(void)
gc_worker(void *x)
{
release(&myproc()->lock); // initially held by scheduler
mtstart(rcu_gc_worker, myproc());
struct spinlock wl;
initlock(&wl, "rcu_gc_worker"); // dummy lock
......
......@@ -3,6 +3,7 @@
#include "types.h"
#include "amd64.h"
#include "kernel.h"
#define IO_TIMER1 0x040 // 8253 Timer #1
#define TIMER_FREQ 1193182
......@@ -25,6 +26,13 @@ microdelay(u64 delay)
nop_pause();
}
u64
nsectime(void)
{
u64 msec = ticks*QUANTUM;
return msec*1000000;
}
void
inithz(void)
{
......
......@@ -57,9 +57,14 @@ void tree_test(void);
void cgaputc(int c);
// condvar.c
extern u64 ticks;
extern struct spinlock tickslock;
extern struct condvar cv_ticks;
void initcondvar(struct condvar *, char *);
void cv_sleep(struct condvar *cv, struct spinlock*);
void cv_sleepto(struct condvar *cv, struct spinlock*, u64);
void cv_wakeup(struct condvar *cv);
void cv_tick(void);
// console.c
void cprintf(const char*, ...);
......@@ -67,6 +72,7 @@ void panic(const char*) __attribute__((noreturn));
void snprintf(char *buf, u32 n, char *fmt, ...);
void consoleintr(int(*)(void));
// crange.c
struct clist_range {
......@@ -89,6 +95,11 @@ struct clist_range* crange_search(struct crange *cr, u64 k);
int crange_foreach(struct crange *crk, int (*f)(struct clist_range *r, void *st), void *st);
void crange_print(struct crange *cr, int);
// e1000.c
extern int e1000irq;
void e1000intr(void);
int e1000tx(void *buf, u32 len);
// exec.c
int exec(char*, char**);
......@@ -127,8 +138,7 @@ void gc_begin_epoch();
void gc_end_epoch();
void gc_delayed(void*, void (*dofree)(void*));
void gc_delayed2(int, u64, void (*dofree)(int, u64));
void gc_start(void);
void gc_worker(void);
void gc_start(void);
// hwvm.c
void freevm(pml4e_t*);
......@@ -137,6 +147,7 @@ pme_t * walkpgdir(pml4e_t*, const void*, int);
// hz.c
void microdelay(u64);
u64 nsectime(void);
// ide.c
void ideinit(void);
......@@ -172,6 +183,11 @@ void lapicpc(char mask);
extern int ncpu;
int mpbcpu(void);
// net.c
void netfree(void *va);
void* netalloc(void);
void netrx(void *va, u16 len);
// ns.c
enum {
nskey_int = 1,
......@@ -242,6 +258,7 @@ void userinit(void);
int wait(void);
void yield(void);
void migrate(struct proc *);
struct proc* threadalloc(void (*fn)(void*), void *arg);
// prof.c
extern int profenable;
......@@ -260,6 +277,7 @@ void sampconf(void);
// spinlock.c
void acquire(struct spinlock*);
int tryacquire(struct spinlock*);
void getcallerpcs(void*, uptr*);
int holding(struct spinlock*);
void initlock(struct spinlock*, char*);
......@@ -291,9 +309,6 @@ void swtch(struct context**, struct context*);
// trap.c
extern struct segdesc bootgdt[NSEGS];
extern u64 ticks;
extern struct spinlock tickslock;
extern struct condvar cv_ticks;
// uart.c
void uartputc(char c);
......
......@@ -91,9 +91,9 @@ initlapic(void)
lapichz = 100 * (ccr0 - ccr1);
}
count = (QUANTUN*lapichz) / 1000;
count = (QUANTUM*lapichz) / 1000;
if (count > 0xffffffff)
panic("initlapic: QUANTUN too large");
panic("initlapic: QUANTUM too large");
// The timer repeatedly counts down at bus frequency
// from lapic[TICR] and then issues an interrupt.
......
......@@ -25,6 +25,7 @@ extern void inituser(void);
extern void inithz(void);
extern void initwq(void);
extern void initsamp(void);
extern void initpci(void);
static volatile int bstate;
......@@ -102,6 +103,7 @@ cmain(u64 mbmagic, u64 mbaddr)
initwq(); // work queues
#endif
initsamp();
initpci();
cprintf("ncpu %d %lu MHz\n", ncpu, cpuhz / 1000000);
......
#include "types.h"
#include "kernel.h"
#ifdef LWIP
#include "lwip/tcpip.h"
#endif
static u8 ping_packet[] = {
0x52, 0x55, 0x0a, 0x00, 0x02, 0x02, 0x52, 0x54, 0x00, 0x12,
0x34, 0x56, 0x08, 0x00, 0x45, 0x00, 0x00, 0x54, 0x00, 0x00,
0x40, 0x00, 0x40, 0x01, 0x22, 0x99, 0x0a, 0x00, 0x02, 0x0f,
0x0a, 0x00, 0x02, 0x02, 0x08, 0x00, 0x94, 0xa0, 0x03, 0x07,
0x00, 0x01, 0x71, 0xfc, 0xec, 0x4e, 0x00, 0x00, 0x00, 0x00,
0x3b, 0x39, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37
};
void
netfree(void *va)
{
kfree(va);
}
void *
netalloc(void)
{
return kalloc();
}
void
netrx(void *va, u16 len)
{
cprintf("netrx %lx len %x\n", va, len);
}
void
nettest(void)
{
void *ping;
u32 len;
ping = netalloc();
len = sizeof(ping_packet);
memmove(ping, ping_packet, len);
e1000tx(ping, len);
//e1000tx(ping_packet, sizeof(ping_packet));
}
#ifdef LWIP
int errno;
static void
tcpip_init_done(void *arg)
{
cprintf("tcpip_init_done: %lx\n", arg);
}
void
initnet(void)
{
tcpip_init(&tcpip_init_done, NULL);
cprintf("initnet:\n");
}
#else
void
initnet(void)
{
}
#endif
HAVE_LWIP = $(shell (cd lwip 2> /dev/null && echo y) || echo n)
ifeq ($(HAVE_LWIP),y)
-include $(O)/lwip/src/api/*.d
-include $(O)/lwip/src/netif/*.d
-include $(O)/lwip/src/core/*.d
-include $(O)/net/*.d
OBJS += $(O)/liblwip.a
CFLAGS += -Ilwip/src/include -Inet -Ilwip/src/include/ipv4 -I. -DLWIP
LWIP_CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb \
-m64 -Werror -std=c99 -fms-extensions -mno-sse -mcmodel=large -I$(QEMUSRC) \
-fno-omit-frame-pointer -DHW_$(HW) -include param.h \
-Wno-attributes -Wno-address -Wno-char-subscripts
LWIP_CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
LWIP_INCLUDES := \
-Ilwip/src/include \
-Ilwip/src/include/ipv4 \
-Inet \
-Inet/arch \
-I.
LWIP_SRCFILES += \
lwip/src/api/api_lib.c \
lwip/src/api/api_msg.c \
lwip/src/api/err.c \
lwip/src/api/sockets.c \
lwip/src/api/tcpip.c \
lwip/src/api/netbuf.c \
lwip/src/core/init.c \
lwip/src/core/tcp_in.c \
lwip/src/core/dhcp.c \
lwip/src/core/def.c \
lwip/src/core/mem.c \
lwip/src/core/memp.c \
lwip/src/core/netif.c \
lwip/src/core/pbuf.c \
lwip/src/core/raw.c \
lwip/src/core/stats.c \
lwip/src/core/sys.c \
lwip/src/core/tcp.c \
lwip/src/core/timers.c \
lwip/src/core/ipv4/ip_addr.c \
lwip/src/core/ipv4/icmp.c \
lwip/src/core/ipv4/ip.c \
lwip/src/core/ipv4/ip_frag.c \
lwip/src/core/ipv4/inet_chksum.c \
lwip/src/core/ipv4/inet.c \
lwip/src/core/tcp_out.c \
lwip/src/core/udp.c \
lwip/src/netif/etharp.c \
net/sys_arch.c \
LWIP_OBJFILES := $(patsubst %.c, $(O)/%.o, $(LWIP_SRCFILES))
LWIP_OBJFILES := $(patsubst %.S, $(O)/%.o, $(LWIP_OBJFILES))
$(O)/net/%.o: net/%.c
@echo " CC $@"
$(Q)mkdir -p $(@D)
$(Q)$(CC) $(LWIP_CFLAGS) $(LWIP_INCLUDES) -c -o $@ $<
$(O)/lwip/src/%.o: lwip/src/%.c
@echo " CC $@"
$(Q)mkdir -p $(@D)
$(Q)$(CC) $(LWIP_CFLAGS) $(LWIP_INCLUDES) -c -o $@ $<
$(O)/liblwip.a: $(LWIP_OBJFILES)
@echo " AR $@"
$(Q)mkdir -p $(@D)
$(Q)$(AR) r $@ $(LWIP_OBJFILES)
endif
#ifndef LWIP_ARCH_CC_H
#define LWIP_ARCH_CC_H
#include <types.h>
void panic(const char*) __attribute__((noreturn));
void cprintf(const char*, ...);
typedef u32 u32_t;
typedef s32 s32_t;
typedef u64 u64_t;
typedef s64 s64_t;
typedef u16 u16_t;
typedef s16 s16_t;
typedef u8 u8_t;
typedef s8 s8_t;
typedef uptr mem_ptr_t;
#define PACK_STRUCT_FIELD(x) x __attribute__((packed))
#define PACK_STRUCT_STRUCT __attribute__((packed))
#define PACK_STRUCT_BEGIN
#define PACK_STRUCT_END
#define S16_F "d"
#define U16_F "u"
#define X16_F "x"
#define S32_F "d"
#define U32_F "u"
#define X32_F "x"
#define LWIP_PLATFORM_DIAG(x) cprintf x
#define LWIP_PLATFORM_ASSERT(x) panic(x)
#ifndef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
#endif
#endif
#ifndef LWIP_ARCH_PERF_H
#define LWIP_ARCH_PERF_H
#define PERF_START
#define PERF_STOP(x)
#endif
#ifndef LWIP_ARCH_SYS_ARCH_H
#define LWIP_ARCH_SYS_ARCH_H
#include "spinlock.h"
#include "condvar.h"
typedef int sys_thread_t;
typedef struct sys_mbox {
#define MBOXSLOTS 32
void *msg[MBOXSLOTS];
} sys_mbox_t;
typedef struct sys_sem {
struct spinlock s;
struct condvar c;
u8 count;
} sys_sem_t;
typedef struct sys_mutex {
struct spinlock s;
} sys_mutex_t;
#define SYS_MBOX_NULL (-1)
#define SYS_SEM_NULL (-1)
void lwip_core_lock(void);
void lwip_core_unlock(void);
void lwip_core_init(void);
#define SYS_ARCH_DECL_PROTECT(lev)
#define SYS_ARCH_PROTECT(lev)
#define SYS_ARCH_UNPROTECT(lev)
#define SYS_ARCH_NOWAIT 0xfffffffe
#endif
#ifndef XV6_LWIPOPTS_H
#define XV6_LWIPOPTS_H
#define LWIP_STATS 0
#define LWIP_STATS_DISPLAY 0
#define LWIP_DHCP 1
#define LWIP_COMPAT_SOCKETS 0
//#define SYS_LIGHTWEIGHT_PROT 1
#define LWIP_PROVIDE_ERRNO 1
#define MEM_ALIGNMENT 4
#define MEMP_NUM_PBUF 64
#define MEMP_NUM_UDP_PCB 8
#define MEMP_NUM_TCP_PCB 32
#define MEMP_NUM_TCP_PCB_LISTEN 16
#define MEMP_NUM_TCP_SEG TCP_SND_QUEUELEN// at least as big as TCP_SND_QUEUELEN
#define MEMP_NUM_NETBUF 128
#define MEMP_NUM_NETCONN 32
#define MEMP_NUM_SYS_TIMEOUT 6
#define PER_TCP_PCB_BUFFER (16 * 4096)
#define MEM_SIZE (PER_TCP_PCB_BUFFER*MEMP_NUM_TCP_SEG + 4096*MEMP_NUM_TCP_SEG)
#define PBUF_POOL_SIZE 512
#define PBUF_POOL_BUFSIZE 2000
#define TCP_MSS 1460
#define TCP_WND 24000
#define TCP_SND_BUF (16 * TCP_MSS)
// lwip prints a warning if TCP_SND_QUEUELEN < (2 * TCP_SND_BUF/TCP_MSS),
// but 16 is faster..
#define TCP_SND_QUEUELEN (2 * TCP_SND_BUF/TCP_MSS)
//#define TCP_SND_QUEUELEN 16
// Print error messages when we run out of memory
#define LWIP_DEBUG 1
//#define TCP_DEBUG LWIP_DBG_ON
//#define MEMP_DEBUG LWIP_DBG_ON
//#define SOCKETS_DEBUG LWIP_DBG_ON
//#define DBG_TYPES_ON LWIP_DBG_ON
//#define PBUF_DEBUG LWIP_DBG_ON
//#define API_LIB_DEBUG LWIP_DBG_ON
#define DBG_MIN_LEVEL DBG_LEVEL_SERIOUS
#define LWIP_DBG_MIN_LEVEL 0
#define MEMP_SANITY_CHECK 0
#endif
void* memset(void*, int, unsigned int);
void* memcpy(void *dst, const void *src, unsigned int n);
void* memset(void*, int, unsigned int);
unsigned int strlen(const char* str);
#include "lwip/sys.h"
#include "arch/sys_arch.h"
#include "kernel.h"
#define DIE panic(__func__)
//
// mbox
//
err_t
sys_mbox_new(sys_mbox_t *mbox, int size)
{
if (size > MBOXSLOTS) {
cprintf("sys_mbox_new: size %u\n", size);
return ERR_MEM;
}
return ERR_OK;
}
void
sys_mbox_set_invalid(sys_mbox_t *mbox)
{
DIE;
}
err_t
sys_mbox_trypost(sys_mbox_t *mbox, void *msg)
{
DIE;
}
int
sys_mbox_valid(sys_mbox_t *mbox)
{
DIE;
}
void
sys_mbox_post(sys_mbox_t *mbox, void *msg)
{
DIE;
}
void
sys_mbox_free(sys_mbox_t *mbox)
{
DIE;
}
u32_t
sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout)
{
DIE;
}
u32_t
sys_arch_mbox_tryfetch(sys_mbox_t *mbox, void **msg)
{
DIE;
}
//
// sem
//
err_t
sys_sem_new(sys_sem_t *sem, u8_t count)
{
initlock(&sem->s, "lwIP sem");
initcondvar(&sem->c, "lwIP condvar");
sem->count = count;
return ERR_OK;
}
void
sys_sem_free(sys_sem_t *sem)
{
DIE;
}
void
sys_sem_set_invalid(sys_sem_t *sem)
{
DIE;
}
int
sys_sem_valid(sys_sem_t *sem)
{
DIE;
}
void
sys_sem_signal(sys_sem_t *sem)
{
DIE;
}
u32_t
sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout)
{
DIE;
}
//
// mutex
//
err_t
sys_mutex_new(sys_mutex_t *mutex)
{
initlock(&mutex->s, "lwIP mutex");
return ERR_OK;
}
void
sys_mutex_lock(sys_mutex_t *mutex)
{
DIE;
}
void
sys_mutex_unlock(sys_mutex_t *mutex)
{
DIE;
}
void
sys_mutex_free(sys_mutex_t *mutex)
{
DIE;
}
//
// thread
//
sys_thread_t
sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
int stacksize, int prio)
{
DIE;
}
//
// init
//
void
sys_init(void)
{
}
......@@ -12,7 +12,7 @@
#define INF (~0UL)
#define CACHELINE 64 // cache line size
#define CPUKSTACKS (NPROC + NCPU)
#define QUANTUN 10 // scheduling time quantum and tick length (in msec)
#define QUANTUM 10 // scheduling time quantum and tick length (in msec)
#define WQSHIFT 4 // 2^WORKSHIFT work queue slots
#define VICTIMAGE 1000000 // cycles a proc executes before an eligible victim
#if defined(HW_josmp)
......
#include "types.h"
#include "amd64.h"
#include "kernel.h"
#include "pci.h"
#include "pcireg.h"
extern int e1000attach(struct pci_func *pcif);
// Flag to do "lspci" at bootup
static int pci_show_devs = 1;
static int pci_show_addrs = 1;
// PCI "configuration mechanism one"
static u32 pci_conf1_addr_ioport = 0x0cf8;
static u32 pci_conf1_data_ioport = 0x0cfc;
// PCI driver table
struct pci_driver {
u32 key1, key2;
int (*attachfn) (struct pci_func *pcif);
};
// Forward declarations
static int pci_bridge_attach(struct pci_func *pcif);
// pci_attach_class matches the class and subclass of a PCI device
struct pci_driver pci_attach_class[] = {
{ PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_PCI, &pci_bridge_attach },
{ 0, 0, 0 },
};
// pci_attach_vendor matches the vendor ID and device ID of a PCI device
// http://www.intel.com/products/ethernet/resource.htm?wapkw=software%20manual%20pcie#s1=Gigabit%20Ethernet&s2=all&s3=Manual
struct pci_driver pci_attach_vendor[] = {
// [E1000 5.2]
// QEMU emulates an 82540EM, specifically.
{ 0x8086, 0x100e, &e1000attach },
// Both of ud0's e1000e (82573E, 82573L)
{ 0x8086, 0x108c, &e1000attach },
{ 0x8086, 0x109A, &e1000attach },
{ 0, 0, 0 },
};
static const char *pci_class[] =
{
[0x0] = "Unknown",
[0x1] = "Storage controller",
[0x2] = "Network controller",
[0x3] = "Display controller",
[0x4] = "Multimedia device",
[0x5] = "Memory controller",
[0x6] = "Bridge device",
};
static void
pci_print_func(struct pci_func *f)
{
const char *class = pci_class[0];
if (PCI_CLASS(f->dev_class) < sizeof(pci_class) / sizeof(pci_class[0]))
class = pci_class[PCI_CLASS(f->dev_class)];
cprintf("PCI: %x:%x.%d: %x:%x: class: %x.%x (%s) irq: %d\n",
f->bus->busno, f->dev, f->func,
PCI_VENDOR(f->dev_id), PCI_PRODUCT(f->dev_id),
PCI_CLASS(f->dev_class), PCI_SUBCLASS(f->dev_class), class,
f->irq_line);
}
static void
pci_conf1_set_addr(u32 bus,
u32 dev,
u32 func,
u32 offset)
{
if (!(bus < 256 &&
dev < 32 &&
func < 8 &&
offset < 256 &&
(offset&0x3) == 0))
panic("pci_conf1_set_addr");
u32 v = (1 << 31) | // config-space
(bus << 16) | (dev << 11) | (func << 8) | (offset);
outl(pci_conf1_addr_ioport, v);
}
static u32
pci_conf_read(struct pci_func *f, u32 off)
{
pci_conf1_set_addr(f->bus->busno, f->dev, f->func, off);
return inl(pci_conf1_data_ioport);
}
static void
pci_conf_write(struct pci_func *f, u32 off, u32 v)
{
pci_conf1_set_addr(f->bus->busno, f->dev, f->func, off);
outl(pci_conf1_data_ioport, v);
}
static int __attribute__((warn_unused_result))
pci_attach_match(u32 key1, u32 key2,
struct pci_driver *list, struct pci_func *pcif)
{
u32 i;
for (i = 0; list[i].attachfn; i++) {
if (list[i].key1 == key1 && list[i].key2 == key2) {
int r = list[i].attachfn(pcif);
if (r > 0)
return r;
if (r < 0)
cprintf("pci_attach_match: attaching "
"%x.%x (%p): e\n",
key1, key2, list[i].attachfn, r);
}
}
return 0;
}
static int
pci_attach(struct pci_func *f)
{
return
pci_attach_match(PCI_CLASS(f->dev_class),
PCI_SUBCLASS(f->dev_class),
&pci_attach_class[0], f) ||
pci_attach_match(PCI_VENDOR(f->dev_id),
PCI_PRODUCT(f->dev_id),
&pci_attach_vendor[0], f);
}
static int
pci_scan_bus(struct pci_bus *bus)
{
int totaldev = 0;
struct pci_func df;
memset(&df, 0, sizeof(df));
df.bus = bus;
for (df.dev = 0; df.dev < 32; df.dev++) {
u32 bhlc = pci_conf_read(&df, PCI_BHLC_REG);
if (PCI_HDRTYPE_TYPE(bhlc) > 1) // Unsupported or no device
continue;
totaldev++;
struct pci_func f = df;
for (f.func = 0; f.func < (PCI_HDRTYPE_MULTIFN(bhlc) ? 8 : 1);
f.func++) {
struct pci_func af = f;
af.dev_id = pci_conf_read(&f, PCI_ID_REG);
if (PCI_VENDOR(af.dev_id) == 0xffff)
continue;
u32 intr = pci_conf_read(&af, PCI_INTERRUPT_REG);
af.irq_line = PCI_INTERRUPT_LINE(intr);
af.dev_class = pci_conf_read(&af, PCI_CLASS_REG);
if (pci_show_devs)
pci_print_func(&af);
pci_attach(&af);
}
}
return totaldev;
}
static int
pci_bridge_attach(struct pci_func *pcif)
{
u32 ioreg = pci_conf_read(pcif, PCI_BRIDGE_STATIO_REG);
u32 busreg = pci_conf_read(pcif, PCI_BRIDGE_BUS_REG);
if (PCI_BRIDGE_IO_32BITS(ioreg)) {
cprintf("PCI: %x:%x.%d: 32-bit bridge IO not supported.\n",
pcif->bus->busno, pcif->dev, pcif->func);
return 0;
}
struct pci_bus nbus;
memset(&nbus, 0, sizeof(nbus));
nbus.parent_bridge = pcif;
nbus.busno = (busreg >> PCI_BRIDGE_BUS_SECONDARY_SHIFT) & 0xff;
if (pci_show_devs)
cprintf("PCI: %x:%x.%d: bridge to PCI bus %d--%d\n",
pcif->bus->busno, pcif->dev, pcif->func,
nbus.busno,
(busreg >> PCI_BRIDGE_BUS_SUBORDINATE_SHIFT) & 0xff);
pci_scan_bus(&nbus);
return 1;
}
void
pci_func_enable(struct pci_func *f)
{
pci_conf_write(f, PCI_COMMAND_STATUS_REG,
PCI_COMMAND_IO_ENABLE |
PCI_COMMAND_MEM_ENABLE |
PCI_COMMAND_MASTER_ENABLE);
u32 bar_width;
u32 bar;
for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END;
bar += bar_width)
{
u32 oldv = pci_conf_read(f, bar);
bar_width = 4;
pci_conf_write(f, bar, 0xffffffff);
u32 rv = pci_conf_read(f, bar);
if (rv == 0)
continue;
int regnum = PCI_MAPREG_NUM(bar);
u32 base, size;
if (PCI_MAPREG_TYPE(rv) == PCI_MAPREG_TYPE_MEM) {
if (PCI_MAPREG_MEM_TYPE(rv) == PCI_MAPREG_MEM_TYPE_64BIT)
bar_width = 8;
size = PCI_MAPREG_MEM_SIZE(rv);
base = PCI_MAPREG_MEM_ADDR(oldv);
if (pci_show_addrs)
cprintf(" mem region %d: %d bytes at 0x%x\n",
regnum, size, base);
} else {
size = PCI_MAPREG_IO_SIZE(rv);
base = PCI_MAPREG_IO_ADDR(oldv);
if (pci_show_addrs)
cprintf(" io region %d: %d bytes at 0x%x\n",
regnum, size, base);
}
pci_conf_write(f, bar, oldv);
f->reg_base[regnum] = base;
f->reg_size[regnum] = size;
if (size && !base)
cprintf("PCI device %02x:%02x.%d (%04x:%04x) "
"may be misconfigured: "
"region %d: base 0x%x, size %d\n",
f->bus->busno, f->dev, f->func,
PCI_VENDOR(f->dev_id), PCI_PRODUCT(f->dev_id),
regnum, base, size);
}
cprintf("PCI function %x:%x.%d (%x:%x) enabled\n",
f->bus->busno, f->dev, f->func,
PCI_VENDOR(f->dev_id), PCI_PRODUCT(f->dev_id));
}
void
initpci(void)
{
static struct pci_bus root_bus;
memset(&root_bus, 0, sizeof(root_bus));
pci_scan_bus(&root_bus);
}
// PCI subsystem interface
enum { pci_res_bus, pci_res_mem, pci_res_io, pci_res_max };
struct pci_bus;
struct pci_func {
struct pci_bus *bus; // Primary bus for bridges
u32 dev;
u32 func;
u32 dev_id;
u32 dev_class;
u32 reg_base[6];
u32 reg_size[6];
u8 irq_line;
};
struct pci_bus {
struct pci_func *parent_bridge;
u32 busno;
};
int pci_init(void);
void pci_func_enable(struct pci_func *f);
差异被折叠。
......@@ -12,6 +12,8 @@
#include "vm.h"
#include "sched.h"
extern void threadstub(void);
int __mpalign__ idle[NCPU];
struct ns *nspid __mpalign__;
static struct proc *bootproc __mpalign__;
......@@ -267,17 +269,19 @@ inituser(void)
release(&p->lock);
for (u32 c = 0; c < NCPU; c++) {
struct proc *rcup = allocproc();
rcup->vmap = vmap_alloc();
rcup->context->rip = (u64) gc_worker;
rcup->cwd = 0;
rcup->cpuid = c;
rcup->cpu_pin = 1;
acquire(&rcup->lock);
rcup->state = RUNNABLE;
addrun(rcup);
release(&rcup->lock);
extern void gc_worker(void*);
struct proc *gcp;
gcp = threadalloc(gc_worker, NULL);
if (gcp == NULL)
panic("threadalloc: gc_worker");
gcp->cpuid = c;
gcp->cpu_pin = 1;
acquire(&gcp->lock);
gcp->state = RUNNABLE;
addrun(gcp);
release(&gcp->lock);
}
}
......@@ -352,7 +356,7 @@ scheduler(void)
mtpause(schedp);
if (p->context->rip != (uptr)forkret &&
p->context->rip != (uptr)gc_worker)
p->context->rip != (uptr)threadstub)
{
mtresume(p);
}
......@@ -663,3 +667,34 @@ wait(void)
release(&myproc()->lock);
}
}
void
threadhelper(void (*fn)(void *), void *arg)
{
release(&myproc()->lock); // initially held by scheduler
mtstart(fn, myproc());
fn(arg);
exit();
}
struct proc*
threadalloc(void (*fn)(void *), void *arg)
{
struct proc *p;
p = allocproc();
if (p == NULL)
return NULL;
p->vmap = vmap_alloc();
if (p->vmap == NULL) {
gc_delayed(p, kmfree);
return NULL;
}
p->context->rip = (u64)threadstub;
p->context->r12 = (u64)fn;
p->context->r13 = (u64)arg;
p->cwd = 0;
return p;
}
......@@ -42,8 +42,6 @@ struct proc {
struct proc *parent; // Parent process
struct trapframe *tf; // Trap frame for current syscall
struct context *context; // swtch() here to run process
struct condvar *oncv; // Where it is sleeping, for kill()
struct proc *cv_next; // Linked list of processes waiting for oncv
int killed; // If non-zero, have been killed
struct file *ofile[NOFILE]; // Open files
struct inode *cwd; // Current directory
......@@ -66,6 +64,11 @@ struct proc {
struct runq *runq;
struct wqframe wqframe;
STAILQ_ENTRY(proc) runqlink;
struct condvar *oncv; // Where it is sleeping, for kill()
u64 cv_wakeup; // Wakeup time for this process
LIST_ENTRY(proc) cv_waiters; // Linked list of processes waiting for oncv
LIST_ENTRY(proc) cv_sleep; // Linked list of processes sleeping on a cv
};
extern struct ns *nspid;
......@@ -18,6 +18,33 @@ initlock(struct spinlock *lk, char *name)
lk->locked = 0;
}
int
tryacquire(struct spinlock *lk)
{
pushcli(); // disable interrupts to avoid deadlock.
#if SPINLOCK_DEBUG
if(holding(lk)) {
cprintf("%lx\n", __builtin_return_address(0));
panic("acquire");
}
#endif
mtlock(lk);
if (xchg32(&lk->locked, 1) != 0) {
popcli();
return 0;
}
mtacquired(lk);
#if SPINLOCK_DEBUG
// Record info about lock acquisition for debugging.
lk->cpu = mycpu();
getcallerpcs(&lk, lk->pcs);
#endif
return 1;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
......
......@@ -24,3 +24,9 @@ swtch:
popq %rbp
popq %rbx
ret
.globl threadstub
threadstub:
movq %r12, %rdi
movq %r13, %rsi
jmp threadhelper
......@@ -11,11 +11,6 @@
#include "kmtrace.h"
#include "bits.h"
u64 ticks __mpalign__;
struct spinlock tickslock __mpalign__;
struct condvar cv_ticks __mpalign__;
struct segdesc __attribute__((aligned(16))) bootgdt[NSEGS] = {
// null
[0]=SEGDESC(0, 0, 0),
......@@ -76,12 +71,8 @@ trap(struct trapframe *tf)
switch(tf->trapno){
case T_IRQ0 + IRQ_TIMER:
if(mycpu()->id == 0){
acquire(&tickslock);
ticks++;
cv_wakeup(&cv_ticks);
release(&tickslock);
}
if (mycpu()->id == 0)
cv_tick();
lapiceoi();
break;
case T_IRQ0 + IRQ_IDE:
......@@ -117,6 +108,13 @@ trap(struct trapframe *tf)
sampconf();
break;
default:
if (tf->trapno == T_IRQ0+e1000irq) {
e1000intr();
lapiceoi();
piceoi();
break;
}
if(myproc() == 0 || (tf->cs&3) == 0){
// In kernel, it must be our mistake.
cprintf("unexpected trap %d from cpu %d rip %lx (cr2=0x%lx)\n",
......
......@@ -6,6 +6,7 @@ typedef unsigned char uint8;
typedef uint64 uintptr;
typedef uint8 u8;
typedef char s8;
typedef uint16 u16;
typedef short s16;
typedef uint32 u32;
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论