cleaned up buffer manager comments, added some asserts

This commit is contained in:
Sears Russell 2010-07-13 18:17:25 +00:00
parent ee7aaff9ec
commit e2ba421a53
2 changed files with 28 additions and 24 deletions

View file

@ -174,12 +174,13 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
if(succ) {
// The getStaleAndRemove was not atomic with the hashtable remove, which is OK (but we can't trust tmp anymore...)
assert(tmp == tls->p);
tmp = 0;
if(tls->p->id >= 0) {
ch->page_handle->write(ch->page_handle, tls->p);
}
hashtable_remove_finish(ch->ht, &h); // need to hold bucket lock until page is flushed. Otherwise, another thread could read stale data from the filehandle.
tls->p->id = INVALID_PAGE; // in case loadPage has a pointer to it, and we did this in race with it; when loadPage reacquires loadlatch, it will notice the discrepency
tls->p->id = INVALID_PAGE; // in case loadPage has a pointer to it, and we did this in race with it; when loadPage reacquires loadlatch, it will notice the discrepancy
assert(!tls->p->dirty);
unlock(tls->p->loadlatch);
break;
@ -193,7 +194,7 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
} else {
// page is not in hashtable, but it is in LRU. getStale and hashtable remove are not atomic.
// However, we cannot observe this; the lru remove happens before the hashtable remove,
// and the hashtable insert happens before the lru insert.
// and the lru insert happens while we hold a latch on the hashtable bucket.
abort();
}
count ++;
@ -211,7 +212,6 @@ static Page * chLoadPageImpl_helper(stasis_buffer_manager_t* bm, int xid, const
stasis_buffer_concurrent_hash_tls_t *tls = populateTLS(bm);
hashtable_bucket_handle_t h;
Page * p = 0;
int first = 1;
do {
if(p) { // the last time around pinned the wrong page!
@ -220,7 +220,7 @@ static Page * chLoadPageImpl_helper(stasis_buffer_manager_t* bm, int xid, const
// unlock(p->loadlatch);
}
while(NULL == (p = hashtable_test_and_set_lock(ch->ht, pageid, tls->p, &h))) {
first = 0;
// The page was not in the hash. Now it is up to us.
p = tls->p;
tls->p = NULL;
@ -228,6 +228,10 @@ static Page * chLoadPageImpl_helper(stasis_buffer_manager_t* bm, int xid, const
// Need to acquire lock because some loadPage (in race with us) could have a
// pointer to the page. However, we must be the only thread
// that has the page in its TLS, or something is seriously wrong.
// This cannot deadlock because the load page will look at p->id, and see
// that it is -1. Then it will immediately release loadlatch, allowing
// us to make progress.
writelock(p->loadlatch, 0);
// this has to happen before the page enters LRU; concurrentWrapper (and perhaps future implementations) hash on pageid.

View file

@ -145,7 +145,7 @@ static inline void rwlc_assertunlocked(rwlc * lock) {
}
static inline void rwlc_readunlock(rwlc *lock) { readunlock(lock->rw); }
static inline void rwlc_cond_wait(pthread_cond_t * cond, rwlc *lock) {
rwlc_assertlocked(lock);
if(!lock->is_writelocked) { abort(); }
lock->is_writelocked = 0;
writeunlock(lock->rw);
pthread_cond_wait(cond, &lock->mut);
@ -154,7 +154,7 @@ static inline void rwlc_cond_wait(pthread_cond_t * cond, rwlc *lock) {
lock->is_writelocked = 1;
}
static inline int rwlc_cond_timedwait(pthread_cond_t * cond, rwlc *lock, struct timespec * ts) {
rwlc_assertlocked(lock);
if(!lock->is_writelocked) { abort(); }
lock->is_writelocked = 0;
writeunlock(lock->rw);
int ret = pthread_cond_timedwait(cond, &lock->mut, ts);