less concurrent latching

This commit is contained in:
Sears Russell 2009-11-10 02:15:39 +00:00
parent 51911f79f2
commit 07c3ad7979

View file

@ -126,8 +126,12 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
while(tls->p == NULL) { while(tls->p == NULL) {
Page * tmp = ch->lru->getStale(ch->lru); Page * tmp = ch->lru->getStale(ch->lru);
hashtable_bucket_handle_t h; hashtable_bucket_handle_t h;
tls->p = hashtable_remove_begin(ch->ht, tmp->id, &h); // Deadlocks here. tls->p = hashtable_remove_begin(ch->ht, tmp->id, &h);
if(tls->p && trywritelock(tls->p->loadlatch,0)) { if(tls->p) {
// TODO: It would be nice to make this a trywritelock to avoid blocking here.
// However, this would cause subtle problems; page->id could change while we're in LRU. LRU partitions its latches on page->id, and references state
// stored with the page...
writelock(tls->p->loadlatch,0);
// note that the removes are atomic. This is guaranteed by the hashtable lock. // note that the removes are atomic. This is guaranteed by the hashtable lock.
ch->lru->remove(ch->lru, tls->p); ch->lru->remove(ch->lru, tls->p);
// The getStale was not atomic, which is OK (but we can't trust tmp anymore...) // The getStale was not atomic, which is OK (but we can't trust tmp anymore...)
@ -139,19 +143,24 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
assert(!tls->p->dirty); assert(!tls->p->dirty);
unlock(tls->p->loadlatch); unlock(tls->p->loadlatch);
break; break;
} else if(tls->p) { // } else if(tls->p) {
// page is pinned or something. Oh well. Throw this one back (atomically). // // page is pinned or something. Oh well. Throw this one back (atomically).
hashtable_remove_cancel(ch->ht, &h); // hashtable_remove_cancel(ch->ht, &h);
// hit the page. This is safe because lru latches it. (We might be hitting some arbitrary page here, but don't care) // // hit the page. This is safe because lru latches it. (We might be hitting some arbitrary page here, but don't care)
ch->lru->hit(ch->lru, tmp); // ch->lru->hit(ch->lru, tmp);
// Go around the loop again. // // Go around the loop again.
tls->p = NULL; // tls->p = NULL;
} else { } else {
// otherwise, page is not in hashtable, but it is in LRU. We can observe this because getStale and hashtable remove are not atomic. // otherwise, page is not in hashtable, but it is in LRU. We can observe this because getStale and hashtable remove are not atomic.
// remove failed; need to 'complete' it to release latch (otherwise, this is a no-op) // remove failed; need to 'complete' it to release latch (otherwise, this is a no-op)
// no need to hit the page; we will not spin on it for long (the other thread will be removing it from lru before it blocks on I/O)
// hit the page so we don't spin on it; readlock suffices, since we don't want to change the pageid.
// readlock(tmp->loadlatch,0);
// ch->lru->hit(ch->lru, tmp);
// unlock(tmp->loadlatch);
hashtable_remove_finish(ch->ht, &h); hashtable_remove_finish(ch->ht, &h);
// hit the page so we don't spin on it.
ch->lru->hit(ch->lru, tmp);
} }
count ++; count ++;
if(count == 100) { if(count == 100) {
@ -173,10 +182,11 @@ static Page * chLoadPageImpl_helper(stasis_buffer_manager_t* bm, int xid, const
p = tls->p; p = tls->p;
tls->p = NULL; tls->p = NULL;
ch->lru->insert(ch->lru, p);
int succ = trywritelock(p->loadlatch, 0); int succ = trywritelock(p->loadlatch, 0);
assert(succ); assert(succ);
ch->lru->insert(ch->lru, p);
hashtable_unlock(&h); hashtable_unlock(&h);
p->id = pageid; p->id = pageid;
@ -191,8 +201,8 @@ static Page * chLoadPageImpl_helper(stasis_buffer_manager_t* bm, int xid, const
tls = populateTLS(bm); tls = populateTLS(bm);
if(needFlush(bm)) { pthread_cond_signal(&ch->needFree); } if(needFlush(bm)) { pthread_cond_signal(&ch->needFree); }
} }
if(first) { ch->lru->hit(ch->lru, p); }
readlock(p->loadlatch, 0); readlock(p->loadlatch, 0);
if(first) { ch->lru->hit(ch->lru, p); }
hashtable_unlock(&h); hashtable_unlock(&h);
assert(p->id == pageid); assert(p->id == pageid);
return p; return p;