Add getCachedPage() call. This allows dirtyPageTable to write back pages with out accidentally reading them back in from disk with loadPage(). This should improve performance and allow loadPageOfType() to be used safely.
This commit is contained in:
parent
4493dbb88b
commit
b44f8b17b3
6 changed files with 45 additions and 9 deletions
|
@ -164,6 +164,7 @@ compensated_function void __profile_releasePage(Page * p) {
|
|||
|
||||
Page * (*loadPageImpl)(int xid, pageid_t pageid, pagetype_t type) = 0;
|
||||
Page * (*loadUninitPageImpl)(int xid, pageid_t pageid) = 0;
|
||||
Page * (*getCachedPageImpl)(int xid, pageid_t pageid) = 0;
|
||||
void (*releasePageImpl)(Page * p) = 0;
|
||||
void (*writeBackPage)(Page * p) = 0;
|
||||
void (*forcePages)() = 0;
|
||||
|
@ -175,7 +176,6 @@ Page * loadPage(int xid, pageid_t pageid) {
|
|||
// This lock is released at Tcommit()
|
||||
if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); }
|
||||
return loadPageImpl(xid, pageid, UNKNOWN_TYPE_PAGE);
|
||||
|
||||
}
|
||||
Page * loadPageOfType(int xid, pageid_t pageid, pagetype_t type) {
|
||||
if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); }
|
||||
|
@ -186,9 +186,10 @@ Page * loadUninitializedPage(int xid, pageid_t pageid) {
|
|||
if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); }
|
||||
|
||||
return loadUninitPageImpl(xid, pageid);
|
||||
|
||||
}
|
||||
|
||||
Page * getCachedPage(int xid, pageid_t pageid) {
|
||||
return getCachedPageImpl(xid, pageid);
|
||||
}
|
||||
void releasePage(Page * p) {
|
||||
releasePageImpl(p);
|
||||
}
|
||||
|
|
|
@ -190,6 +190,27 @@ static void * writeBackWorker(void * ignored) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static Page * bhGetCachedPage(int xid, const pageid_t pageid) {
|
||||
pthread_mutex_lock(&mut);
|
||||
// Is the page in cache?
|
||||
Page * ret = LH_ENTRY(find)(cachedPages, &pageid, sizeof(pageid));
|
||||
if(ret) {
|
||||
checkPageState(ret);
|
||||
if(!*pagePendingPtr(ret)) {
|
||||
// good
|
||||
if(!*pagePinCountPtr(ret) ) {
|
||||
// Then ret is in lru (otherwise it would be pending, or not cached); remove it.
|
||||
lru->remove(lru, ret);
|
||||
}
|
||||
(*pagePinCountPtr(ret))++;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&mut);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static Page * bhLoadPageImpl_helper(int xid, const pageid_t pageid, int uninitialized, pagetype_t type) {
|
||||
|
||||
// Note: Calls to loadlatch in this function violate lock order, but
|
||||
|
@ -397,6 +418,7 @@ void stasis_buffer_manager_hash_open(stasis_page_handle_t * h) {
|
|||
|
||||
loadPageImpl = bhLoadPageImpl;
|
||||
loadUninitPageImpl = bhLoadUninitPageImpl;
|
||||
getCachedPageImpl = bhGetCachedPage;
|
||||
releasePageImpl = bhReleasePage;
|
||||
writeBackPage = bhWriteBackPage;
|
||||
forcePages = bhForcePages;
|
||||
|
|
|
@ -45,6 +45,7 @@ int stasis_buffer_manager_deprecated_open(stasis_page_handle_t * ph) {
|
|||
releasePageImpl = bufManReleasePage;
|
||||
loadPageImpl = bufManLoadPage;
|
||||
loadUninitPageImpl = bufManLoadUninitPage;
|
||||
getCachedPageImpl = bufManLoadPage; // Since this code is deprecated, loadPage is "good enough" though it breaks segments.
|
||||
writeBackPage = pageWrite_legacyWrapper;
|
||||
forcePages = forcePageFile_legacyWrapper;
|
||||
forcePageRange = forceRangePageFile_legacyWrapper;
|
||||
|
|
|
@ -62,6 +62,7 @@ void stasis_buffer_manager_mem_array_open () {
|
|||
|
||||
releasePageImpl = paReleasePage;
|
||||
loadPageImpl = paLoadPage;
|
||||
getCachedPageImpl = paLoadPage;
|
||||
writeBackPage = paWriteBackPage;
|
||||
forcePages = paForcePages;
|
||||
stasis_buffer_manager_close = paBufDeinit;
|
||||
|
|
|
@ -91,10 +91,12 @@ void stasis_dirty_page_table_flush(stasis_dirty_page_table_t * dirtyPages) {
|
|||
pthread_mutex_unlock(&dirtyPages->mutex);
|
||||
|
||||
for(i = 0; i < MAX_BUFFER_SIZE && staleDirtyPages[i] != -1; i++) {
|
||||
p = loadPage(-1, staleDirtyPages[i]);
|
||||
p = getCachedPage(-1, staleDirtyPages[i]);
|
||||
if(p) {
|
||||
writeBackPage(p);
|
||||
releasePage(p);
|
||||
}
|
||||
}
|
||||
free(staleDirtyPages);
|
||||
}
|
||||
void stasis_dirty_page_table_flush_range(stasis_dirty_page_table_t * dirtyPages, pageid_t start, pageid_t stop) {
|
||||
|
@ -117,10 +119,12 @@ void stasis_dirty_page_table_flush_range(stasis_dirty_page_table_t * dirtyPages,
|
|||
pthread_mutex_unlock(&dirtyPages->mutex);
|
||||
|
||||
for(i = 0; i < MAX_BUFFER_SIZE && staleDirtyPages[i] != -1; i++) {
|
||||
p = loadPage(-1, staleDirtyPages[i]);
|
||||
p = getCachedPage(-1, staleDirtyPages[i]);
|
||||
if(p) {
|
||||
writeBackPage(p);
|
||||
releasePage(p);
|
||||
}
|
||||
}
|
||||
free(staleDirtyPages);
|
||||
forcePageRange(start*PAGE_SIZE,stop*PAGE_SIZE);
|
||||
|
||||
|
|
|
@ -97,6 +97,7 @@ Page * loadPageOfType(int xid, pageid_t pageid, pagetype_t type);
|
|||
|
||||
Page * loadUninitializedPage(int xid, pageid_t pageid);
|
||||
|
||||
Page * getCachedPage(int xid, const pageid_t pageid);
|
||||
|
||||
/**
|
||||
This is the function pointer that stasis_buffer_manager_open sets in order to
|
||||
|
@ -104,6 +105,12 @@ Page * loadUninitializedPage(int xid, pageid_t pageid);
|
|||
*/
|
||||
extern Page * (*loadPageImpl)(int xid, pageid_t pageid, pagetype_t type);
|
||||
extern Page * (*loadUninitPageImpl)(int xid, pageid_t pageid);
|
||||
/**
|
||||
Get a page from cache. This function should never block on I/O.
|
||||
|
||||
@return a pointer to the page, or NULL if the page is not in cache, or is being read from disk.
|
||||
*/
|
||||
extern Page * (*getCachedPageImpl)(int xid, pageid_t pageid);
|
||||
/**
|
||||
loadPage aquires a lock when it is called, effectively pinning it
|
||||
in memory. releasePage releases this lock.
|
||||
|
|
Loading…
Reference in a new issue