提交 e1badec2 创建 作者: Nickolai Zeldovich's avatar Nickolai Zeldovich

more atomic template use

上级 f1490815
......@@ -24,9 +24,9 @@ struct inode : public rcu_freed {
u32 dev; // Device number
u32 inum; // Inode number
u32 gen; // Generation number
int ref; // Reference count
atomic<int> ref; // Reference count
int flags; // I_BUSY, I_VALID
int readbusy;
atomic<int> readbusy;
struct condvar cv;
struct spinlock lock;
char lockname[16];
......
......@@ -249,10 +249,10 @@ iget(u32 dev, u32 inum)
ip = ins->lookup(mkpair(dev, inum));
if (ip) {
// tricky: first bump ref, then check free flag
__sync_fetch_and_add(&ip->ref, 1);
ip->ref++;
if (ip->flags & I_FREE) {
gc_end_epoch();
__sync_sub_and_fetch(&ip->ref, 1);
ip->ref--;
goto retry;
}
gc_end_epoch();
......@@ -339,7 +339,7 @@ iget(u32 dev, u32 inum)
struct inode*
idup(struct inode *ip)
{
__sync_fetch_and_add(&ip->ref, 1);
ip->ref++;
return ip;
}
......@@ -357,7 +357,7 @@ ilock(struct inode *ip, int writer)
while(ip->flags & (I_BUSYW | (writer ? I_BUSYR : 0)))
cv_sleep(&ip->cv, &ip->lock);
ip->flags |= I_BUSYR | (writer ? I_BUSYW : 0);
__sync_fetch_and_add(&ip->readbusy, 1);
ip->readbusy++;
release(&ip->lock);
if((ip->flags & I_VALID) == 0)
......@@ -372,7 +372,7 @@ iunlock(struct inode *ip)
panic("iunlock");
acquire(&ip->lock);
int lastreader = __sync_sub_and_fetch(&ip->readbusy, 1);
int lastreader = (--ip->readbusy);
ip->flags &= ~(I_BUSYW | ((lastreader==0) ? I_BUSYR : 0));
cv_wakeup(&ip->cv);
release(&ip->lock);
......@@ -382,7 +382,7 @@ iunlock(struct inode *ip)
void
iput(struct inode *ip)
{
if(__sync_sub_and_fetch(&ip->ref, 1) == 0) {
if(--ip->ref == 0) {
if (ip->nlink)
return;
acquire(&ip->lock);
......@@ -407,7 +407,7 @@ iput(struct inode *ip)
}
ip->flags |= (I_BUSYR | I_BUSYW);
__sync_fetch_and_add(&ip->readbusy, 1);
ip->readbusy++;
release(&ip->lock);
......
......@@ -11,6 +11,8 @@ extern "C" {
}
#include "ns.hh"
#include "atomic.hh"
extern u64 proc_hash(const u32&);
extern xns<u32, proc*, proc_hash> *xnspid;
......@@ -41,7 +43,7 @@ static struct gc_state {
struct condvar cv;
headinfo delayed[NEPOCH];
headinfo tofree[NEPOCH];
int ndelayed;
atomic<int> ndelayed;
int min_epoch;
int nrun;
int nfree;
......@@ -156,13 +158,13 @@ gc_delayfreelist(void)
void
gc_delayed(rcu_freed *e)
{
__sync_fetch_and_add(&gc_state[mycpu()->id].ndelayed, 1);
gc_state[mycpu()->id].ndelayed++;
pushcli();
int c = mycpu()->id;
u64 myepoch = myproc()->epoch;
u64 minepoch = gc_state[c].delayed[myepoch % NEPOCH].epoch;
if (gc_debug)
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed);
cprintf("(%d, %d): gc_delayed: %lu ndelayed %d\n", c, myproc()->pid, global_epoch, gc_state[c].ndelayed.load());
if (myepoch != minepoch) {
cprintf("%d: myepoch %lu minepoch %lu\n", myproc()->pid, myepoch, minepoch);
panic("gc_delayed_int");
......@@ -226,7 +228,7 @@ gc_worker(void *x)
for (i = gc_state[mycpu()->id].min_epoch; i < global-2; i++) {
int nfree = gc_free_tofreelist(&(gc_state[mycpu()->id].tofree[i%NEPOCH].head), i);
gc_state[mycpu()->id].tofree[i%NEPOCH].epoch += NEPOCH;
__sync_fetch_and_sub(&gc_state[mycpu()->id].ndelayed, nfree);
gc_state[mycpu()->id].ndelayed -= nfree;
if (0 && nfree > 0) {
cprintf("%d: epoch %lu freed %d\n", mycpu()->id, i, nfree);
}
......
......@@ -69,7 +69,7 @@ sys_kernlet(int fd, size_t count, off_t off)
if(f->type != file::FD_INODE)
return -1;
fetchadd(&f->ip->ref, 1);
f->ip->ref++;
w = pread_allocwork(f->ip, myproc()->vmap->kshared, count, off);
if (w == NULL) {
iput(f->ip);
......
......@@ -216,7 +216,7 @@ pagefault(struct vmap *vmap, uptr va, u32 err)
if (vm_debug)
cprintf("pagefault: err 0x%x va 0x%lx type %d ref %lu pid %d\n",
err, va, m->va_type, (u64) m->n->ref, myproc()->pid);
err, va, m->va_type, m->n->ref.load(), myproc()->pid);
if (m->va_type == COW && (err & FEC_WR)) {
if (pagefault_wcow(vmap, va, pte, m, npg) < 0) {
......
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论