Did away with slottedPreRallocFromPage.

This commit is contained in:
Sears Russell 2006-06-13 20:00:46 +00:00
parent 164f84a533
commit a11fbdd9e6
5 changed files with 17 additions and 63 deletions

View file

@ -44,6 +44,11 @@ terms specified in this license.
*
* Implementation of log truncation for lladd.
*
* @todo TRUNCATE_INTERVAL should be dynamically set...
* @todo log truncation policy should be set of the percentage of the log that can be truncated
* (instead of by absolute logfile size)
* @todo avoid copying the non-truncated tail of the log each time truncation occurs.
*
* $Id$
*
*/

View file

@ -77,13 +77,6 @@
*/
//}end
static int operate(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
/* * @ todo Currently, T alloc () needs to clean up the page type (for recovery). Should this be elsewhere? */
/* if(*page_type_ptr(p) == UNINITIALIZED_PAGE) {
*page_type_ptr(p) = SLOTTED_PAGE;
}
assert(*page_type_ptr(p) == SLOTTED_PAGE); */
if(rid.size >= BLOB_THRESHOLD_SIZE && rid.size != BLOB_SLOT) {
allocBlob(xid, p, lsn, rid);
@ -203,7 +196,6 @@ compensated_function recordid Talloc(int xid, long size) {
compensated_function recordid TallocFromPage(int xid, long page, long size) {
recordid rid;
Page * p = NULL;
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
try_ret(NULLRID) {
rid = preAllocBlobFromPage(xid, page, size);
@ -211,29 +203,22 @@ compensated_function recordid TallocFromPage(int xid, long page, long size) {
} end_ret(NULLRID);
} else {
begin_action_ret(pthread_mutex_unlock, &talloc_mutex, NULLRID) {
Page * p = loadPage(xid, page);
pthread_mutex_lock(&talloc_mutex);
rid = slottedPreRallocFromPage(xid, page, size, &p);
if(rid.size == size) {
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
} else {
p = loadPage(xid, page);
if(slottedFreespace(p) < size) {
slottedCompact(p);
releasePage(p);
p = NULL;
rid = slottedPreRallocFromPage(xid, page, size, &p);
if(rid.size == size) {
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
} else {
assert(rid.size < 0);
}
}
if(p) {
/* @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes.. */
releasePage(p);
if(slottedFreespace(p) < size) {
rid = NULLRID;
} else {
rid = slottedRawRalloc(p, size);
assert(rid.size == size);
Tupdate(xid, rid, NULL, OPERATION_ALLOC);
}
releasePage(p);
} compensate_ret(NULLRID);
}
return rid;
}

View file

@ -196,16 +196,12 @@ size_t slottedFreespace(Page * page) {
@todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery.
*/
compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
recordid ret;
int isBlob = 0;
if(size == BLOB_SLOT) {
isBlob = 1;
size = sizeof(blob_record_t);
}
assert(size < BLOB_THRESHOLD_SIZE);
@ -237,8 +233,9 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
} end_ret(NULLRID);
slottedPageInitialize(*pp);
}
assert(*page_type_ptr(*pp) == SLOTTED_PAGE);
ret = slottedRawRalloc(*pp, size);
assert(ret.size == size);
if(isBlob) {
*slot_length_ptr(*pp, ret.slot) = BLOB_SLOT;
@ -249,33 +246,6 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
return ret;
}
compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page **pp) {
int isBlob = 0;
if(size == BLOB_SLOT) {
isBlob = 1;
size = sizeof(blob_record_t);
}
try_ret(NULLRID) {
*pp = loadPage(xid, page);
} end_ret(NULLRID);
if(slottedFreespace(*pp) < size) {
releasePage(*pp);
*pp = NULL;
return NULLRID;
}
if(*page_type_ptr(*pp) == UNINITIALIZED_PAGE) {
slottedPageInitialize(*pp);
}
assert(*page_type_ptr(*pp) == SLOTTED_PAGE);
recordid ret = slottedRawRalloc(*pp, size);
assert(ret.size == size);
if(isBlob) {
*slot_length_ptr(*pp, ret.slot) = BLOB_SLOT;
}
return ret;
}
recordid slottedRawRalloc(Page * page, int size) {

View file

@ -92,11 +92,6 @@ void slottedPageInitialize(Page * p);
*
*/
compensated_function recordid slottedPreRalloc(int xid, long size, Page**p);
/**
Identical to slottedPreRalloc, but allows the user to specify which page the
record should be allocated in.
*/
compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page**p);
/**
* The second phase of slot allocation. Called after the log entry

View file

@ -132,7 +132,6 @@ static void* periodicTruncation(void * ignored) {
if(LogFlushedLSN() - LogTruncationPoint() > TARGET_LOG_SIZE) {
truncateNow();
}
// @todo TRUNCATE_INTERVAL should be dynamically set...
struct timeval now;
struct timespec timeout;
int timeret = gettimeofday(&now, 0);