Introduce and use sleeplocks instead of BUSY flags

Remove I_BUSY, B_BUSY, and intrans defs and usages
One spinlock per buf to avoid ugly loop in bget
fix race in filewrite (don't update f->off after releasing lock)
This commit is contained in:
Frans Kaashoek 2011-08-26 10:08:29 -04:00
parent 8a9b6dbd44
commit 3a5fa7ed90
12 changed files with 106 additions and 63 deletions

41
bio.c
View file

@ -13,9 +13,7 @@
// * Only one process at a time can use a buffer, // * Only one process at a time can use a buffer,
// so do not keep them longer than necessary. // so do not keep them longer than necessary.
// //
// The implementation uses three state flags internally: // The implementation uses two state flags internally:
// * B_BUSY: the block has been returned from bread
// and has not been passed back to brelse.
// * B_VALID: the buffer data has been initialized // * B_VALID: the buffer data has been initialized
// with the associated disk block contents. // with the associated disk block contents.
// * B_DIRTY: the buffer data has been modified // * B_DIRTY: the buffer data has been modified
@ -51,6 +49,8 @@ binit(void)
b->next = bcache.head.next; b->next = bcache.head.next;
b->prev = &bcache.head; b->prev = &bcache.head;
b->dev = -1; b->dev = -1;
initlock(&b->lock, "buf");
initsleeplock(&b->sleeplock);
bcache.head.next->prev = b; bcache.head.next->prev = b;
bcache.head.next = b; bcache.head.next = b;
} }
@ -58,42 +58,43 @@ binit(void)
// Look through buffer cache for sector on device dev. // Look through buffer cache for sector on device dev.
// If not found, allocate fresh block. // If not found, allocate fresh block.
// In either case, return locked buffer. // In either case, return sleep-locked buffer.
static struct buf* static struct buf*
bget(uint dev, uint sector) bget(uint dev, uint sector)
{ {
struct buf *b; struct buf *b;
acquire(&bcache.lock); acquire(&bcache.lock);
loop:
// Try for cached block. // Try for cached block.
for(b = bcache.head.next; b != &bcache.head; b = b->next){ for(b = bcache.head.next; b != &bcache.head; b = b->next){
acquire(&b->lock);
if(b->dev == dev && b->sector == sector){ if(b->dev == dev && b->sector == sector){
if(!(b->flags & B_BUSY)){
b->flags |= B_BUSY;
release(&bcache.lock); release(&bcache.lock);
acquire_sleeplock(&b->sleeplock, &b->lock);
release(&b->lock);
return b; return b;
} }
sleep(b, &bcache.lock); release(&b->lock);
goto loop;
}
} }
// Allocate fresh block. // Allocate fresh block.
for(b = bcache.head.prev; b != &bcache.head; b = b->prev){ for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
if((b->flags & B_BUSY) == 0){ acquire(&b->lock);
if (!acquired_sleeplock(&b->sleeplock)) {
release(&bcache.lock);
b->dev = dev; b->dev = dev;
b->sector = sector; b->sector = sector;
b->flags = B_BUSY; b->flags = 0;
release(&bcache.lock); acquire_sleeplock(&b->sleeplock, &b->lock);
release(&b->lock);
return b; return b;
} }
release(&b->lock);
} }
panic("bget: no buffers"); panic("bget: no buffers");
} }
// Return a B_BUSY buf with the contents of the indicated disk sector. // Return a locked buf with the contents of the indicated disk sector.
struct buf* struct buf*
bread(uint dev, uint sector) bread(uint dev, uint sector)
{ {
@ -109,7 +110,7 @@ bread(uint dev, uint sector)
void void
bwrite(struct buf *b) bwrite(struct buf *b)
{ {
if((b->flags & B_BUSY) == 0) if(!acquired_sleeplock(&b->sleeplock))
panic("bwrite"); panic("bwrite");
b->flags |= B_DIRTY; b->flags |= B_DIRTY;
iderw(b); iderw(b);
@ -119,11 +120,11 @@ bwrite(struct buf *b)
void void
brelse(struct buf *b) brelse(struct buf *b)
{ {
if((b->flags & B_BUSY) == 0) if(!acquired_sleeplock(&b->sleeplock))
panic("brelse"); panic("brelse");
acquire(&bcache.lock); acquire(&bcache.lock);
acquire(&b->lock);
b->next->prev = b->prev; b->next->prev = b->prev;
b->prev->next = b->next; b->prev->next = b->next;
b->next = bcache.head.next; b->next = bcache.head.next;
@ -131,8 +132,8 @@ brelse(struct buf *b)
bcache.head.next->prev = b; bcache.head.next->prev = b;
bcache.head.next = b; bcache.head.next = b;
b->flags &= ~B_BUSY; release_sleeplock(&b->sleeplock);
wakeup(b); release(&b->lock);
release(&bcache.lock); release(&bcache.lock);
} }

7
buf.h
View file

@ -2,12 +2,13 @@ struct buf {
int flags; int flags;
uint dev; uint dev;
uint sector; uint sector;
struct spinlock lock;
struct sleeplock sleeplock;
struct buf *prev; // LRU cache list struct buf *prev; // LRU cache list
struct buf *next; struct buf *next;
struct buf *qnext; // disk queue struct buf *qnext; // disk queue
uchar data[512]; uchar data[512];
}; };
#define B_BUSY 0x1 // buffer is locked by some process #define B_VALID 0x1 // buffer has been read from disk
#define B_VALID 0x2 // buffer has been read from disk #define B_DIRTY 0x2 // buffer needs to be written to disk
#define B_DIRTY 0x4 // buffer needs to be written to disk

5
defs.h
View file

@ -5,6 +5,7 @@ struct inode;
struct pipe; struct pipe;
struct proc; struct proc;
struct spinlock; struct spinlock;
struct sleeplock;
struct stat; struct stat;
struct superblock; struct superblock;
@ -129,6 +130,10 @@ void initlock(struct spinlock*, char*);
void release(struct spinlock*); void release(struct spinlock*);
void pushcli(void); void pushcli(void);
void popcli(void); void popcli(void);
void initsleeplock(struct sleeplock*);
void acquire_sleeplock(struct sleeplock*,struct spinlock*);
void release_sleeplock(struct sleeplock*);
int acquired_sleeplock(struct sleeplock*);
// string.c // string.c
int memcmp(const void*, const void*, uint); int memcmp(const void*, const void*, uint);

6
file.c
View file

@ -2,8 +2,8 @@
#include "defs.h" #include "defs.h"
#include "param.h" #include "param.h"
#include "fs.h" #include "fs.h"
#include "file.h"
#include "spinlock.h" #include "spinlock.h"
#include "file.h"
struct devsw devsw[NDEV]; struct devsw devsw[NDEV];
struct { struct {
@ -133,7 +133,8 @@ filewrite(struct file *f, char *addr, int n)
begin_trans(); begin_trans();
ilock(f->ip); ilock(f->ip);
r = writei(f->ip, addr + i, f->off, n1); if ((r = writei(f->ip, addr + i, f->off, n1)) > 0)
f->off += r;
iunlock(f->ip); iunlock(f->ip);
commit_trans(); commit_trans();
@ -141,7 +142,6 @@ filewrite(struct file *f, char *addr, int n)
break; break;
if(r != n1) if(r != n1)
panic("short filewrite"); panic("short filewrite");
f->off += r;
i += r; i += r;
} }
return i == n ? n : -1; return i == n ? n : -1;

6
file.h
View file

@ -15,7 +15,9 @@ struct inode {
uint dev; // Device number uint dev; // Device number
uint inum; // Inode number uint inum; // Inode number
int ref; // Reference count int ref; // Reference count
int flags; // I_BUSY, I_VALID int flags; // I_VALID
struct spinlock lock;
struct sleeplock sleeplock;
short type; // copy of disk inode short type; // copy of disk inode
short major; short major;
@ -25,10 +27,8 @@ struct inode {
uint addrs[NDIRECT+1]; uint addrs[NDIRECT+1];
}; };
#define I_BUSY 0x1
#define I_VALID 0x2 #define I_VALID 0x2
// device implementations // device implementations
struct devsw { struct devsw {

32
fs.c
View file

@ -113,11 +113,8 @@ bfree(int dev, uint b)
// It is an error to use an inode without holding a reference to it. // It is an error to use an inode without holding a reference to it.
// //
// Processes are only allowed to read and write inode // Processes are only allowed to read and write inode
// metadata and contents when holding the inode's lock, // metadata and contents when holding the inode's sleeplock.
// represented by the I_BUSY flag in the in-memory copy. // Callers are responsible for locking
// Because inode locks are held during disk accesses,
// they are implemented using a flag rather than with
// spin locks. Callers are responsible for locking
// inodes before passing them to routines in this file; leaving // inodes before passing them to routines in this file; leaving
// this responsibility with the caller makes it possible for them // this responsibility with the caller makes it possible for them
// to create arbitrarily-sized atomic operations. // to create arbitrarily-sized atomic operations.
@ -216,6 +213,7 @@ iget(uint dev, uint inum)
ip->inum = inum; ip->inum = inum;
ip->ref = 1; ip->ref = 1;
ip->flags = 0; ip->flags = 0;
initsleeplock(&ip->sleeplock);
release(&icache.lock); release(&icache.lock);
return ip; return ip;
@ -232,7 +230,7 @@ idup(struct inode *ip)
return ip; return ip;
} }
// Lock the given inode. // Acquire the sleeplock for a given inode.
void void
ilock(struct inode *ip) ilock(struct inode *ip)
{ {
@ -243,9 +241,7 @@ ilock(struct inode *ip)
panic("ilock"); panic("ilock");
acquire(&icache.lock); acquire(&icache.lock);
while(ip->flags & I_BUSY) acquire_sleeplock(&ip->sleeplock, &icache.lock);
sleep(ip, &icache.lock);
ip->flags |= I_BUSY;
release(&icache.lock); release(&icache.lock);
if(!(ip->flags & I_VALID)){ if(!(ip->flags & I_VALID)){
@ -268,12 +264,11 @@ ilock(struct inode *ip)
void void
iunlock(struct inode *ip) iunlock(struct inode *ip)
{ {
if(ip == 0 || !(ip->flags & I_BUSY) || ip->ref < 1) if(ip == 0 || !acquired_sleeplock(&ip->sleeplock) || ip->ref < 1)
panic("iunlock"); panic("iunlock");
acquire(&icache.lock); acquire(&icache.lock);
ip->flags &= ~I_BUSY; release_sleeplock(&ip->sleeplock);
wakeup(ip);
release(&icache.lock); release(&icache.lock);
} }
@ -284,14 +279,15 @@ iput(struct inode *ip)
acquire(&icache.lock); acquire(&icache.lock);
if(ip->ref == 1 && (ip->flags & I_VALID) && ip->nlink == 0){ if(ip->ref == 1 && (ip->flags & I_VALID) && ip->nlink == 0){
// inode is no longer used: truncate and free inode. // inode is no longer used: truncate and free inode.
if(ip->flags & I_BUSY) if(acquired_sleeplock(&ip->sleeplock))
panic("iput busy"); panic("iput busy");
ip->flags |= I_BUSY; acquire_sleeplock(&ip->sleeplock, &icache.lock);
release(&icache.lock); release(&icache.lock);
itrunc(ip); itrunc(ip);
ip->type = 0; ip->type = 0;
iupdate(ip); iupdate(ip);
acquire(&icache.lock); acquire(&icache.lock);
release_sleeplock(&ip->sleeplock);
ip->flags = 0; ip->flags = 0;
wakeup(ip); wakeup(ip);
} }
@ -433,10 +429,14 @@ writei(struct inode *ip, char *src, uint off, uint n)
return devsw[ip->major].write(ip, src, n); return devsw[ip->major].write(ip, src, n);
} }
if(off > ip->size || off + n < off) if(off > ip->size || off + n < off) {
panic("writei1");
return -1; return -1;
if(off + n > MAXFILE*BSIZE) }
if(off + n > MAXFILE*BSIZE) {
panic("writei2");
return -1; return -1;
}
for(tot=0; tot<n; tot+=m, off+=m, src+=m){ for(tot=0; tot<n; tot+=m, off+=m, src+=m){
bp = bread(ip->dev, bmap(ip, off/BSIZE)); bp = bread(ip->dev, bmap(ip, off/BSIZE));

2
ide.c
View file

@ -127,7 +127,7 @@ iderw(struct buf *b)
{ {
struct buf **pp; struct buf **pp;
if(!(b->flags & B_BUSY)) if(!acquired_sleeplock(&b->sleeplock))
panic("iderw: buf not busy"); panic("iderw: buf not busy");
if((b->flags & (B_VALID|B_DIRTY)) == B_VALID) if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
panic("iderw: nothing to do"); panic("iderw: nothing to do");

14
log.c
View file

@ -43,9 +43,9 @@ struct logheader {
struct { struct {
struct spinlock lock; struct spinlock lock;
struct sleeplock sleeplock;
int start; int start;
int size; int size;
int intrans;
int dev; int dev;
struct logheader lh; struct logheader lh;
} log; } log;
@ -60,6 +60,7 @@ initlog(void)
struct superblock sb; struct superblock sb;
initlock(&log.lock, "log"); initlock(&log.lock, "log");
initsleeplock(&log.sleeplock);
readsb(ROOTDEV, &sb); readsb(ROOTDEV, &sb);
log.start = sb.size - sb.nlog; log.start = sb.size - sb.nlog;
log.size = sb.nlog; log.size = sb.nlog;
@ -133,10 +134,7 @@ void
begin_trans(void) begin_trans(void)
{ {
acquire(&log.lock); acquire(&log.lock);
while (log.intrans) { acquire_sleeplock(&log.sleeplock, &log.lock);
sleep(&log, &log.lock);
}
log.intrans = 1;
release(&log.lock); release(&log.lock);
} }
@ -149,10 +147,8 @@ commit_trans(void)
log.lh.n = 0; log.lh.n = 0;
write_head(); // Reclaim log write_head(); // Reclaim log
} }
acquire(&log.lock); acquire(&log.lock);
log.intrans = 0; release_sleeplock(&log.sleeplock);
wakeup(&log);
release(&log.lock); release(&log.lock);
} }
@ -171,7 +167,7 @@ log_write(struct buf *b)
if (log.lh.n >= LOGSIZE || log.lh.n >= log.size - 1) if (log.lh.n >= LOGSIZE || log.lh.n >= log.size - 1)
panic("too big a transaction"); panic("too big a transaction");
if (!log.intrans) if (!acquired_sleeplock(&log.sleeplock))
panic("write outside of trans"); panic("write outside of trans");
// cprintf("log_write: %d %d\n", b->sector, log.lh.n); // cprintf("log_write: %d %d\n", b->sector, log.lh.n);

2
pipe.c
View file

@ -4,8 +4,8 @@
#include "mmu.h" #include "mmu.h"
#include "proc.h" #include "proc.h"
#include "fs.h" #include "fs.h"
#include "file.h"
#include "spinlock.h" #include "spinlock.h"
#include "file.h"
#define PIPESIZE 512 #define PIPESIZE 512

View file

@ -17,10 +17,9 @@ initlock(struct spinlock *lk, char *name)
lk->cpu = 0; lk->cpu = 0;
} }
// Acquire the lock. // Acquire a spin lock. Loops (spins) until the lock is acquired.
// Loops (spins) until the lock is acquired. // Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it.
// Holding a lock for a long time may cause // Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks.
// other CPUs to waste time spinning to acquire it.
void void
acquire(struct spinlock *lk) acquire(struct spinlock *lk)
{ {
@ -115,3 +114,38 @@ popcli(void)
sti(); sti();
} }
void
initsleeplock(struct sleeplock *l)
{
l->locked = 0;
}
// Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock
// a data structure for long times, including across sleeps. Other processes that try
// to acquire a sleeplock will be put to sleep when another process hold the sleeplock.
// To update status of the sleeplock atomically, the caller must hold spinl
void
acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl)
{
while (sleepl->locked) {
sleep(sleepl, spinl);
}
sleepl->locked = 1;
}
// Release the sleeplock that is protected by a spin lock
// Caller must hold the spinlock that protects the sleeplock
void
release_sleeplock(struct sleeplock *sleepl)
{
sleepl->locked = 0;
wakeup(sleepl);
}
// Is the sleeplock acquired?
// Caller must hold the spinlock that protects the sleeplock
int
acquired_sleeplock(struct sleeplock *sleepl)
{
return sleepl->locked;
}

View file

@ -1,4 +1,4 @@
// Mutual exclusion lock. // Mutual exclusion lock for short code fragments
struct spinlock { struct spinlock {
uint locked; // Is the lock held? uint locked; // Is the lock held?
@ -9,3 +9,8 @@ struct spinlock {
// that locked the lock. // that locked the lock.
}; };
// Lock that maybe held across sleeps
struct sleeplock {
uint locked; // Is the lock held?
};

View file

@ -5,6 +5,7 @@
#include "mmu.h" #include "mmu.h"
#include "proc.h" #include "proc.h"
#include "fs.h" #include "fs.h"
#include "spinlock.h"
#include "file.h" #include "file.h"
#include "fcntl.h" #include "fcntl.h"