Fixeds cht test / finished error checking within LLADD, but haven't tested error handling yet.

This commit is contained in:
Sears Russell 2005-02-24 21:12:36 +00:00
parent 29dacbe7ed
commit 890a7385d0
14 changed files with 640 additions and 629 deletions

View file

@ -28,15 +28,15 @@ Operation getRealloc();
@return the recordid of the new record.
*/
recordid Talloc(int xid, long size);
compensated_function recordid Talloc(int xid, long size);
recordid TallocFromPage(int xid, long page, long size);
compensated_function recordid TallocFromPage(int xid, long page, long size);
/**
Free a record.
@todo Currently, we just leak store space on dealloc.
*/
void Tdealloc(int xid, recordid rid);
compensated_function void Tdealloc(int xid, recordid rid);
/**
Obtain the type of a record, as returned by getRecordType.
@ -51,7 +51,7 @@ void Tdealloc(int xid, recordid rid);
@see getRecordType
*/
int TrecordType(int xid, recordid rid);
compensated_function int TrecordType(int xid, recordid rid);
/**
Obtain the length of the data stored in a record.
@ -63,9 +63,9 @@ int TrecordType(int xid, recordid rid);
@return -1 if the record does not exist, the size of the record otherwise.
*/
int TrecordSize(int xid, recordid rid);
compensated_function int TrecordSize(int xid, recordid rid);
/** Return the number of records stored in page pageid */
int TrecordsInPage(int xid, int pageid);
compensated_function int TrecordsInPage(int xid, int pageid);
#endif

View file

@ -103,10 +103,10 @@ lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list);
/** @return 1 if there was another entry to be iterated over. 0 otherwise.
If this function returns 1, the caller must free() the malloced memory
returned via the key and value arguments.*/
int TpagedListNext(int xid, lladd_pagedList_iterator * it, byte ** key, int * keySize, byte ** value, int * valueSize);
recordid TpagedListAlloc(int xid);
void TpagedListDelete(int xid, recordid list);
int TpagedListSpansPages(int xid, recordid list);
compensated_function int TpagedListNext(int xid, lladd_pagedList_iterator * it, byte ** key, int * keySize, byte ** value, int * valueSize);
compensated_function recordid TpagedListAlloc(int xid);
compensated_function void TpagedListDelete(int xid, recordid list);
compensated_function int TpagedListSpansPages(int xid, recordid list);
Operation getPagedListInsert();
Operation getPagedListRemove();
#endif

View file

@ -71,6 +71,9 @@ static int _chtEval(DfaSet * dfaSet,
}
if(ht != NULL) {
memcpy(&(__header_ptr(&m)->hashTable), ht, sizeof(clusterHashTable_t));
} else {
//memset(&(__header_ptr(&m)->hashTable), 0, sizeof(clusterHashTable_t));
}
/* printf("%s <- %s\n", __header_ptr(&m)->initiator, dfaSet->networkSetup.localhost); */

View file

@ -32,14 +32,14 @@ static void readRawRecord(int xid, Page * p, recordid rid, void * buf, int size)
recordid blob_rec_rid = rid;
blob_rec_rid.size = size;
readRecord(xid, p, blob_rec_rid, buf);
/* Tread(xid, blob_rec_rid, buf); */
/* T read(xid, blob_rec_rid, buf); */
}
static void writeRawRecord(int xid, Page * p, recordid rid, lsn_t lsn, const void * buf, int size) {
recordid blob_rec_rid = rid;
blob_rec_rid.size = size;
writeRecord(xid, p, lsn, blob_rec_rid, buf);
/* Tset(xid, blob_rec_rid, buf); - We no longer need to write a log
/* T set(xid, blob_rec_rid, buf); - We no longer need to write a log
record out here, since we're called by something that is the
result of a log record.*/
}
@ -195,38 +195,48 @@ void closeBlobStore() {
*/
recordid preAllocBlob(int xid, long blobSize) {
compensated_function recordid preAllocBlob(int xid, long blobSize) {
/* Allocate space for the blob entry. */
recordid rid;
try_ret(NULLRID) {
DEBUG("Allocing blob (size %ld)\n", blobSize);
assert(blobSize > 0); /* Don't support zero length blobs right now... */
/* First in buffer manager. */
recordid rid = Talloc(xid, BLOB_SLOT); //sizeof(blob_record_t));
rid = Talloc(xid, BLOB_SLOT); //sizeof(blob_record_t));
rid.size = blobSize;
} end_ret(NULLRID);
return rid;
}
recordid preAllocBlobFromPage(int xid, long page, long blobSize) {
compensated_function recordid preAllocBlobFromPage(int xid, long page, long blobSize) {
recordid rid;
/* Allocate space for the blob entry. */
try_ret(NULLRID) {
DEBUG("Allocing blob (size %ld)\n", blobSize);
assert(blobSize > 0); /* Don't support zero length blobs right now... */
/* First in buffer manager. */
recordid rid = TallocFromPage(xid, page, BLOB_SLOT); //sizeof(blob_record_t));
rid = TallocFromPage(xid, page, BLOB_SLOT); //sizeof(blob_record_t));
rid.size = blobSize;
} end_ret(NULLRID);
return rid;
}
@ -280,7 +290,7 @@ void allocBlob(int xid, Page * p, lsn_t lsn, recordid rid) {
funlockfile(blobf0);
funlockfile(blobf1);
/* Tset() needs to know to 'do the right thing' here, since we've
/* T set() needs to know to 'do the right thing' here, since we've
changed the size it has recorded for this record, and
writeRawRecord makes sure that that is the case.
@ -337,7 +347,7 @@ static FILE * getDirtyFD(int xid, Page * p, lsn_t lsn, recordid rid) {
/* First, determine if the blob is dirty. */
/* Tread() raw record */
/* T read() raw record */
readRawRecord(xid, p, rid, &rec, sizeof(blob_record_t));
assert(rec.size == rid.size);
@ -352,7 +362,7 @@ static FILE * getDirtyFD(int xid, Page * p, lsn_t lsn, recordid rid) {
/* Flip the fd bit on the record. */
rec.fd = rec.fd ? 0 : 1;
/* Tset() raw record */
/* T set() raw record */
writeRawRecord(xid, p, rid, lsn, &rec, sizeof(blob_record_t));
}
@ -459,7 +469,7 @@ void abortBlobs(int xid) {
and undo have to reason about lazy propogation of values to the
bufferManager, and also have to preserve *write* ordering, even
though the writes may be across many transactions, and could be
propogated in the wrong order. If we generate a Tset() (for the
propogated in the wrong order. If we generate a T set() (for the
blob record in bufferManager) for each write, things become much
easier.
*/

View file

@ -77,8 +77,8 @@ typedef struct {
} blob_record_t;
recordid preAllocBlob(int xid, long blobsize);
recordid preAllocBlobFromPage(int xid, long page, long blobsize);
compensated_function recordid preAllocBlob(int xid, long blobsize);
compensated_function recordid preAllocBlobFromPage(int xid, long page, long blobsize);
/**
Allocate a blob of size blobSize.

View file

@ -65,7 +65,10 @@ void redoUpdate(const LogEntry * e) {
if(e->type == UPDATELOG) {
/* lsn_t pageLSN = readLSN(e->contents.update.rid.page); */
recordid rid = e->contents.update.rid;
Page * p = loadPage(e->xid, rid.page);
Page * p;
try {
p = loadPage(e->xid, rid.page);
} end;
lsn_t pageLSN = pageReadLSN(p);
if(e->LSN > pageLSN) {
@ -79,7 +82,10 @@ void redoUpdate(const LogEntry * e) {
} else if(e->type == CLRLOG) {
LogEntry * f = readLSNEntry(e->contents.clr.thisUpdateLSN);
recordid rid = f->contents.update.rid;
Page * p = loadPage(e->xid, rid.page);
Page * p;
try {
p = loadPage(e->xid, rid.page);
} end;
assert(rid.page == e->contents.update.rid.page); /* @todo Should this always hold? */

View file

@ -9,6 +9,7 @@
#include "../page/slotted.h"
#include <assert.h>
//try{
/**
@file
@ -44,9 +45,9 @@
int page = Treserve(int xid, int size)
This would tell Talloc to treat the page as though 'size' bytes had
already been reserved. The 'free space' that Talloc() reasons
already been reserved. The 'free space' that Talloc () reasons
about would be: max(reservedSpace, usedSpace). A seperate call,
TallocFromPage(xid, page, size) already exists, and should ignore
TallocFromPage (xid, page, size) already exists, and should ignore
the presence of the 'reserved space' field.
Track level locality is another problem that Talloc should address,
@ -54,8 +55,8 @@
Better support for locking. Consider this sequence of events:
recordid rid1 = Talloc(xid1, 1);
recordid rid2 = Talloc(xid2, 1); // May deadlock if page level
recordid rid1 = Talloc (xid1, 1);
recordid rid2 = Talloc (xid2, 1); // May deadlock if page level
// locking is used.
The lock manager needs a 'try lock' operation that allows
@ -74,9 +75,9 @@
$Id$
*/
//}end
static int operate(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
/* * @ todo Currently, Talloc() needs to clean up the page type (for recovery). Should this be elsewhere? */
/* * @ todo Currently, T alloc () needs to clean up the page type (for recovery). Should this be elsewhere? */
/* if(*page_type_ptr(p) == UNINITIALIZED_PAGE) {
*page_type_ptr(p) = SLOTTED_PAGE;
@ -149,12 +150,35 @@ Operation getRealloc() {
return o;
}
recordid Talloc(int xid, long size) {
compensated_function recordid Talloc(int xid, long size) {
recordid rid;
Page * p = NULL;
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
/**@todo is it OK that Talloc doesn't pin the page when a blob is alloced?*/
int isBlob = size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT;
if(isBlob) {
try_ret(NULLRID) {
rid = preAllocBlob(xid, size);
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
} end_ret(NULLRID);
} else {
Page * p = NULL;
begin_action_ret(pthread_mutex_unlock, &talloc_mutex, NULLRID) {
pthread_mutex_lock(&talloc_mutex);
rid = slottedPreRalloc(xid, size, &p);
Tupdate(xid, rid, NULL, OPERATION_ALLOC);
/** @todo does releasePage do the correct error checking? */
releasePage(p);
} compensate_ret(NULLRID);
}
return rid;
/* try_ret(NULL_RID) {
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
///@todo is it OK that Talloc doesn't pin the page when a blob is alloced?
rid = pr eAllocBlob(xid, size);
} else {
pthread_mutex_lock(&talloc_mutex);
rid = slottedPreRalloc(xid, size, &p);
@ -164,72 +188,87 @@ recordid Talloc(int xid, long size) {
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
if(p != NULL) {
/* release the page that preAllocBlob pinned for us. */
/// release the page that preAllocBlob pinned for us.
/* @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes.. */
/// @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes..
releasePage(p);
pthread_mutex_unlock(&talloc_mutex);
}
return rid;
} end_ret(NULLRID);
return rid;*/
}
recordid TallocFromPage(int xid, long page, long size) {
compensated_function recordid TallocFromPage(int xid, long page, long size) {
recordid rid;
Page * p = NULL;
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
try_ret(NULLRID) {
rid = preAllocBlobFromPage(xid, page, size);
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
} end_ret(NULLRID);
} else {
begin_action_ret(pthread_mutex_unlock, &talloc_mutex, NULLRID) {
pthread_mutex_lock(&talloc_mutex);
rid = slottedPreRallocFromPage(xid, page, size, &p);
if(p == NULL) {
assert(rid.size == -1);
pthread_mutex_unlock(&talloc_mutex);
return rid;
}
}
if(rid.size == size) {
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
if(p != NULL) {
/* release the page that preRallocFromPage pinned for us. */
} else {
assert(rid.size < 0);
}
if(p) {
/* @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes.. */
releasePage(p);
pthread_mutex_unlock(&talloc_mutex);
}
} compensate_ret(NULLRID);
}
return rid;
}
void Tdealloc(int xid, recordid rid) {
compensated_function void Tdealloc(int xid, recordid rid) {
void * preimage = malloc(rid.size);
Page * p = loadPage(xid, rid.page);
Page * p;
try {
p = loadPage(xid, rid.page);
} end;
begin_action(releasePage, p) {
readRecord(xid, p, rid, preimage);
/** @todo race in Tdealloc; do we care, or is this something that the log manager should cope with? */
Tupdate(xid, rid, preimage, OPERATION_DEALLOC);
releasePage(p);
} compensate;
free(preimage);
}
int TrecordType(int xid, recordid rid) {
Page * p = loadPage(xid, rid.page);
int ret = getRecordType(xid, p, rid);
compensated_function int TrecordType(int xid, recordid rid) {
Page * p;
try_ret(compensation_error()) {
p = loadPage(xid, rid.page);
} end_ret(compensation_error());
int ret;
ret = getRecordType(xid, p, rid);
releasePage(p);
return ret;
}
int TrecordSize(int xid, recordid rid) {
compensated_function int TrecordSize(int xid, recordid rid) {
int ret;
Page * p = loadPage(xid, rid.page);
Page * p;
try_ret(compensation_error()) {
p = loadPage(xid, rid.page);
} end_ret(compensation_error());
ret = getRecordSize(xid, p, rid);
releasePage(p);
return ret;
}
int TrecordsInPage(int xid, int pageid) {
Page * p = loadPage(xid, pageid);
compensated_function int TrecordsInPage(int xid, int pageid) {
Page * p;
try_ret(compensation_error()) {
p = loadPage(xid, pageid);
} end_ret(compensation_error());
readlock(p->rwlatch, 187);
int ret = *numslots_ptr(p);
unlock(p->rwlatch);

View file

@ -120,12 +120,18 @@ Operation getArrayListAlloc() {
}
/*----------------------------------------------------------------------------*/
compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
/** @todo locking for arrayList... this isn't pressing since currently
the only thing that calls arraylist (the hashtable
implementations) serialize bucket list operations anyway... */
static compensated_function int TarrayListExtendInternal(int xid, recordid rid, int slots, int op) {
Page * p;
try_ret(compensation_error()) {
p = loadPage(xid, rid.page);
} end_ret(compensation_error());
TarrayListParameters tlp = pageToTLP(p);
releasePage(p);
p = NULL;
int lastCurrentBlock;
if(tlp.maxOffset == -1) {
@ -145,124 +151,37 @@ compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
tmp2.slot = 0;
tmp2.size = tlp.size;
/* Iterate over the (small number) of indirection blocks that need to be updated */
for(int i = lastCurrentBlock+1; i <= lastNewBlock; i++) {
/* Alloc block i */
int blockSize = tlp.initialSize * powl(tlp.multiplier, i);
int newFirstPage;
begin_action_ret(releasePage, p, compensation_error()) {
newFirstPage = TpageAllocMany(xid, blockSize);
} end_action_ret(compensation_error());
DEBUG("block %d\n", i);
/* Iterate over the storage blocks that are pointed to by our current indirection block. */
// for(int j = 0; j < blockSize; j++) {
// DEBUG("page %d (%d)\n", j, j + newFirstPage);
// tmp2.page = j + newFirstPage;
/** @todo If we were a little smarter about this, and fixed.c
could handle uninitialized blocks correctly, then we
wouldn't have to iterate over the datapages in
TarrayListExtend() (Fixed?)*/
// T update(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
// }
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
/** @todo what does this do to recovery?? */
/** @todo locking for arrayList... */
/* *page_type_ptr(p) = FIXED_PAGE;
Tset(xid, tmp, &newFirstPage);
*page_type_ptr(p) = ARRAY_LIST_PAGE; */
/* @todo This is a copy of Tupdate!! Replace it.*/
begin_action_ret(releasePage, p, compensation_error()) {
alTupdate(xid, tmp, &newFirstPage, OPERATION_SET);
} end_action_ret(compensation_error());
DEBUG("Tset: {%d, %d, %d} = %d\n", tmp.page, tmp.slot, tmp.size, newFirstPage);
}
tmp.slot = MAX_OFFSET_POSITION;
int newMaxOffset = tlp.maxOffset+slots;
// *page_type_ptr(p) = FIXED_PAGE;
// Tset(xid, tmp, &newMaxOffset);
// *page_type_ptr(p) = ARRAY_LIST_PAGE;
// releasePage(p);
/* @todo This is a copy of Tupdate!! Replace it.*/
begin_action_ret(releasePage, p, compensation_error()) {
alTupdate(xid, tmp, &newMaxOffset, OPERATION_SET);
} compensate_ret(compensation_error());
return 0;
}
/** @todo: TarrayListInstantExtend, is a hacked-up cut and paste version of TarrayListExtend */
compensated_function int TarrayListInstantExtend(int xid, recordid rid, int slots) {
Page * p = loadPage(xid, rid.page);
TarrayListParameters tlp = pageToTLP(p);
int lastCurrentBlock;
if(tlp.maxOffset == -1) {
lastCurrentBlock = -1;
} else{
lastCurrentBlock = getBlockContainingOffset(tlp, tlp.maxOffset, NULL);
}
int lastNewBlock = getBlockContainingOffset(tlp, tlp.maxOffset+slots, NULL);
DEBUG("lastCurrentBlock = %d, lastNewBlock = %d\n", lastCurrentBlock, lastNewBlock);
recordid tmp; /* recordid of slot in base page that holds new block. */
tmp.page = rid.page;
tmp.size = sizeof(int);
recordid tmp2; /* recordid of newly created pages. */
tmp2.slot = 0;
tmp2.size = tlp.size;
/* Iterate over the (small number) of indirection blocks that need to be updated */
try_ret(compensation_error()) {
for(int i = lastCurrentBlock+1; i <= lastNewBlock; i++) {
/* Alloc block i */
int blockSize = tlp.initialSize * powl(tlp.multiplier, i);
int newFirstPage = TpageAllocMany(xid, blockSize);
DEBUG("block %d\n", i);
/* Iterate over the storage blocks that are pointed to by our current indirection block. */
/* for(int j = 0; j < blockSize; j++) {
DEBUG("page %d (%d)\n", j, j + newFirstPage);
tmp2.page = j + newFirstPage;
/ ** @todo If we were a little smarter about this, and fixed.c
coulds handle uninitialized blocks correctly, then we
wouldn't have to iterate over the datapages in
TarrayListExtend() * /
// Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
} */
/* We used to call OPERATION_INITIALIZE_FIXED_PAGE on each page in current indirection block. */
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
/** @todo what does this do to recovery?? */
/** @todo locking for arrayList... */
*page_type_ptr(p) = FIXED_PAGE;
TinstantSet(xid, tmp, &newFirstPage);
*page_type_ptr(p) = ARRAY_LIST_PAGE;
alTupdate(xid, tmp, &newFirstPage, op);
DEBUG("Tset: {%d, %d, %d} = %d\n", tmp.page, tmp.slot, tmp.size, newFirstPage);
}
tmp.slot = MAX_OFFSET_POSITION;
int newMaxOffset = tlp.maxOffset+slots;
/** @todo CORRECTNESS BUG: From recovery's point of view, arrayList is totally wrong! The
only reason we mess with p is beacuse TinstantSet doesn't handle
ARRAY_LIST_PAGES the way we need it to, so this won't be hard to
fix... */
*page_type_ptr(p) = FIXED_PAGE;
TinstantSet(xid, tmp, &newMaxOffset);
*page_type_ptr(p) = ARRAY_LIST_PAGE;
releasePage(p);
alTupdate(xid, tmp, &newMaxOffset, op);
} end_ret(compensation_error());
return 0;
}
compensated_function int TarrayListInstantExtend(int xid, recordid rid, int slots) {
return TarrayListExtendInternal(xid, rid, slots, OPERATION_INSTANT_SET);
}
compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
return TarrayListExtendInternal(xid, rid, slots, OPERATION_SET);
}
static int operateInitFixed(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
fixedPageInitialize(p, rid.size, recordsPerPage(rid.size));
pageWriteLSN(xid, p, lsn);
return 0;
}

View file

@ -70,7 +70,8 @@ int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key,
int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key, int keySize, void * val, int valSize) {
int found;
try_ret(compensation_error()) {
hashEntry * e = malloc(sizeof(hashEntry) + keySize + valSize);
recordid nextEntry;
@ -78,9 +79,10 @@ int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key,
hashRid.slot = bucket_number;
nextEntry = hashRid;
int found = 0;
found = 0;
while(nextEntry.size != -1 && nextEntry.size != 0) {
if(compensation_error()) { break; }
assert(nextEntry.size == sizeof(hashEntry) + keySize + valSize);
Tread(xid, nextEntry, e);
if(!memcmp(key, e+1, keySize) && e->next.size != 0) {
@ -91,6 +93,8 @@ int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key,
nextEntry = e->next;
}
free(e);
} end_ret(compensation_error());
return found;
}
@ -102,11 +106,12 @@ void expand (int xid, recordid hash, int next_split, int i, int keySize, int val
#define AMORTIZE 1000
#define FF_AM 750
if(count <= 0 && !(count * -1) % FF_AM) {
try {
recordid * headerRidB = pblHtLookup(openHashes, &(hash.page), sizeof(int));
int j;
TarrayListExtend(xid, hash, AMORTIZE);
for(j = 0; j < AMORTIZE; j++) {
if(compensation_error()) { break; }
if(next_split >= twoToThe(i-1)+2) {
i++;
next_split = 2;
@ -117,10 +122,12 @@ void expand (int xid, recordid hash, int next_split, int i, int keySize, int val
headerHashBits = i;
}
update_hash_header(xid, hash, i, next_split);
} end;
}
}
void update_hash_header(int xid, recordid hash, int i, int next_split) {
try {
hashEntry * he = pblHtLookup(openHashes, &(hash.page), sizeof(int));
assert(he);
recordid * headerRidB = &he->next;
@ -130,10 +137,13 @@ void update_hash_header(int xid, recordid hash, int i, int next_split) {
headerHashBits = i;
headerNextSplit = next_split;
hash.slot = 1;
Tset(xid, hash, headerRidB);
} end;
}
void rehash(int xid, recordid hashRid, int next_split, int i, int keySize, int valSize) {
try {
int firstA = 1; // Is 'A' the recordid of a bucket?
int firstD = 1; // What about 'D'?
@ -272,7 +282,7 @@ void rehash(int xid, recordid hashRid, int next_split, int i, int keySize, int v
free(D_contents);
free(A_contents);
free(B_contents);
} end;
}
void insertIntoBucket(int xid, recordid hashRid, int bucket_number, hashEntry * bucket_contents,
hashEntry * e, int keySize, int valSize, int skipDelete) {
@ -344,6 +354,7 @@ int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry * buck
memcpy(B, bucket_contents, sizeof(hashEntry) + keySize + valSize);
Baddr = this;
while(B->next.size != -1) {
if(compensation_error()) { break; } // guard the asserts below.
hashEntry * tmp = A;
A = B;
Aaddr = Baddr;

View file

@ -191,7 +191,7 @@ compensated_function void pageOperationsInit() {
commit / abort. If other transactions need to allocate when the
lock is held, then they simply do not reuse pages. Since locking is
not yet implemented, we require applications to manually serialize
transactions that call Talloc() or TdeAlloc
transactions that call Talloc or Tdealloc
A better solution: defer the addition of 100 to the freelist until
commit, and use a 'real' data structure, like a concurrent B-Tree.

View file

@ -10,26 +10,30 @@ typedef struct {
short keySize;
} pagedListEntry;
recordid TpagedListAlloc(int xid) {
recordid ret = Talloc(xid, sizeof(pagedListHeader));
compensated_function recordid TpagedListAlloc(int xid) {
recordid ret;
try_ret(NULLRID) {
ret = Talloc(xid, sizeof(pagedListHeader));
pagedListHeader header;
header.thisPage = 0;
header.nextPage = NULLRID;
Tset(xid, ret, &header);
} end_ret(NULLRID);
return ret;
}
compensated_function int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
int ret;
try_ret(compensation_error()) {
pagedListHeader header;
Tread(xid, list, &header);
recordid headerRid = list;
byte * garbage;
int ret = (TpagedListFind(xid, list, key, keySize, &garbage) != -1);
ret = (TpagedListFind(xid, list, key, keySize, &garbage) != -1);
if(ret) {
TpagedListRemove(xid, list, key, keySize);
free(garbage);
TpagedListRemove(xid, list, key, keySize);
}
int entrySize = sizeof(pagedListEntry) + keySize + valueSize;
@ -39,6 +43,7 @@ compensated_function int TpagedListInsert(int xid, recordid list, const byte * k
// When the loop completes, header will contain the contents of the page header the entry will be inserted into,
// headerrid will contain the rid of that header, and rid will contain the newly allocated recordid
while(rid.size == -1) {
if(compensation_error()) { break; }
if(header.nextPage.size == -1) {
header.nextPage = Talloc(xid, sizeof(pagedListHeader));
DEBUG("allocing on new page %d\n", header.nextPage.page);
@ -69,11 +74,12 @@ compensated_function int TpagedListInsert(int xid, recordid list, const byte * k
DEBUG("Header.thisPage = %d\n", rid.slot);
Tset(xid, headerRid, &header);
free(dat);
} end_ret(compensation_error());
return ret;
}
compensated_function int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value) {
try_ret(compensation_error()) {
pagedListHeader header;
Tread(xid, list, &header);
@ -82,10 +88,12 @@ compensated_function int TpagedListFind(int xid, recordid list, const byte * key
rid.slot = header.thisPage;
while(rid.slot || header.nextPage.size != -1) {
if(compensation_error()) { break; }
if(rid.slot) {
rid.size = TrecordSize(xid, rid);
pagedListEntry * dat;
if(compensation_error()) { break; }
dat = malloc(rid.size);
Tread(xid, rid, dat);
@ -106,15 +114,16 @@ compensated_function int TpagedListFind(int xid, recordid list, const byte * key
} else { // we've reached the end of the last page
rid.slot = 0;
}
}
} end_ret(compensation_error());
return -1;
}
compensated_function int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
pagedListHeader header;
int ret = 0;
try_ret(compensation_error()) {
Tread(xid, list, &header);
recordid headerRid;
recordid rid;
@ -123,8 +132,10 @@ compensated_function int TpagedListRemove(int xid, recordid list, const byte * k
short lastSlot = -1;
headerRid = list;
while(rid.slot || header.nextPage.size != -1) {
if(compensation_error()) { break; }
if(rid.slot) {
rid.size = TrecordSize(xid, rid);
if(compensation_error()) { break; };
pagedListEntry * dat = malloc(rid.size);
Tread(xid, rid, dat);
@ -134,6 +145,7 @@ compensated_function int TpagedListRemove(int xid, recordid list, const byte * k
recordid lastRid = rid;
lastRid.slot = lastSlot;
lastRid.size = TrecordSize(xid, lastRid);
if(compensation_error()) { free(dat); break; }
pagedListEntry * lastRidBuf = malloc(lastRid.size);
Tread(xid, lastRid, lastRidBuf);
lastRidBuf->nextEntry = dat->nextEntry;
@ -145,14 +157,11 @@ compensated_function int TpagedListRemove(int xid, recordid list, const byte * k
}
Tdealloc(xid, rid);
free(dat);
return 1;
ret = 1;
break;
}
lastSlot = rid.slot;
rid.slot = dat->nextEntry;
// }
// if(dat->nextEntry) { // another entry on this page
// lastSlot = rid.slot;
// rid.slot = dat->nextEntry;
free(dat);
} else if (header.nextPage.size != -1) { // another page
lastSlot = -1;
@ -160,34 +169,37 @@ compensated_function int TpagedListRemove(int xid, recordid list, const byte * k
headerRid = header.nextPage;
Tread(xid, header.nextPage, &header);
rid.slot = header.thisPage;
// } else { // we've reached the end of the last page
// rid.slot = 0;
}
// free(dat);
}
return 0;
} end_ret(compensation_error());
return ret;
}
compensated_function int TpagedListMove(int xid, recordid start_list, recordid end_list, const byte * key, int keySize) {
byte * value;
byte * value = NULL;
int ret;
try_ret(compensation_error()) {
int valueSize = TpagedListFind(xid, start_list, key, keySize, &value);
if(valueSize != -1) {
int ret = TpagedListRemove(xid, start_list, key, keySize);
assert(ret);
ret = TpagedListInsert(xid, end_list, key, keySize, value, valueSize);
assert(!ret);
free(value);
return 1;
if(value) { free(value); }
ret = 1;
} else {
return 0;
ret = 0;
}
} end_ret(compensation_error());
return ret;
}
lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list) {
compensated_function lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list) {
pagedListHeader header;
try_ret(NULL) {
Tread(xid, list, &header);
} end_ret(NULL);
lladd_pagedList_iterator * it = malloc(sizeof(lladd_pagedList_iterator));
it->headerRid = header.nextPage;
@ -198,17 +210,20 @@ lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list) {
return it;
}
int TpagedListNext(int xid, lladd_pagedList_iterator * it,
compensated_function int TpagedListNext(int xid, lladd_pagedList_iterator * it,
byte ** key, int * keySize,
byte ** value, int * valueSize) {
while(it->entryRid.slot || it->headerRid.size != -1) {
if(it->entryRid.slot) {
try_ret(compensation_error()) {
it->entryRid.size = TrecordSize(xid, it->entryRid);
} end_ret(compensation_error());
assert(it->entryRid.size != -1);
pagedListEntry * entry = malloc(it->entryRid.size);
begin_action_ret(free, entry, compensation_error()) {
Tread(xid, it->entryRid, entry);
} end_action_ret(compensation_error());
*keySize = entry->keySize;
*valueSize = it->entryRid.size - *keySize - sizeof(pagedListEntry);
@ -227,7 +242,9 @@ int TpagedListNext(int xid, lladd_pagedList_iterator * it,
} else { // move to next page.
pagedListHeader header;
try_ret(compensation_error()) {
Tread(xid, it->headerRid, &header);
} end_ret(compensation_error());
it->entryRid.page = it->headerRid.page;
it->headerRid = header.nextPage;
it->entryRid.slot = header.thisPage;

View file

@ -216,7 +216,6 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
}
compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page **pp) {
recordid ret;
int isBlob = 0;
if(size == BLOB_SLOT) {
isBlob = 1;
@ -235,7 +234,8 @@ compensated_function recordid slottedPreRallocFromPage(int xid, long page, long
slottedPageInitialize(*pp);
}
assert(*page_type_ptr(*pp) == SLOTTED_PAGE);
ret = slottedRawRalloc(*pp, size);
recordid ret = slottedRawRalloc(*pp, size);
assert(ret.size == size);
if(isBlob) {
*slot_length_ptr(*pp, ret.slot) = BLOB_SLOT;
}

View file

@ -92,7 +92,9 @@ int Tinit() {
openLogWriter();
try_ret(compensation_error()) {
pageOperationsInit();
} end_ret(compensation_error());
initNestedTopActions();
ThashInit();
@ -144,20 +146,43 @@ int Tbegin() {
return XactionTable[index].xid;
}
compensated_function void Tupdate(int xid, recordid rid, const void *dat, int op) {
static compensated_function void TupdateHelper(int xid, recordid rid, const void * dat, int op, Page * p) {
LogEntry * e;
try {
if(globalLockManager.writeLockPage) {
globalLockManager.writeLockPage(xid, rid.page);
}
} end;
e = LogUpdate(&XactionTable[xid % MAX_TRANSACTIONS], p, rid, op, dat);
assert(XactionTable[xid % MAX_TRANSACTIONS].prevLSN == e->LSN);
DEBUG("T update() e->LSN: %ld\n", e->LSN);
doUpdate(e, p);
free(e);
}
compensated_function void Tupdate(int xid, recordid rid, const void *dat, int op) {
Page * p;
#ifdef DEBUGGING
pthread_mutex_lock(&transactional_2_mutex);
assert(numActiveXactions <= MAX_TRANSACTIONS);
pthread_mutex_unlock(&transactional_2_mutex);
#endif
try {
p = loadPage(xid, rid.page);
} end;
if(*page_type_ptr(p) == INDIRECT_PAGE) {
releasePage(p);
try {
rid = dereferenceRID(xid, rid);
p = loadPage(xid, rid.page);
} end;
/** @todo Kludge! Shouldn't special case operations in transactional2. */
} else if(*page_type_ptr(p) == ARRAY_LIST_PAGE &&
op != OPERATION_LINEAR_INSERT &&
@ -166,85 +191,66 @@ compensated_function void Tupdate(int xid, recordid rid, const void *dat, int op
op != OPERATION_UNDO_LINEAR_DELETE ) {
rid = dereferenceArrayListRid(p, rid.slot);
releasePage(p);
try {
p = loadPage(xid, rid.page);
} end;
}
/** @todo For logical undo logs, grabbing a lock makes no sense! */
try {
if(globalLockManager.writeLockPage) {
begin_action(releasePage, p) {
TupdateHelper(xid, rid, dat, op, p);
/* if(globalLockManager.writeLockPage) {
globalLockManager.writeLockPage(xid, rid.page);
}
} end;
e = LogUpdate(&XactionTable[xid % MAX_TRANSACTIONS], p, rid, op, dat);
} en d_action;
assert(XactionTable[xid % MAX_TRANSACTIONS].prevLSN == e->LSN);
DEBUG("Tupdate() e->LSN: %ld\n", e->LSN);
doUpdate(e, p);
releasePage(p);
free(e);
releasePage(p);*/
} compensate;
}
compensated_function void alTupdate(int xid, recordid rid, const void *dat, int op) {
LogEntry * e;
Page * p;
Page * p ;
try {
if(globalLockManager.writeLockPage) {
globalLockManager.writeLockPage(xid, rid.page);
}
p = loadPage(xid, rid.page);
} end;
p = loadPage(xid, rid.page);
begin_action(releasePage, p) {
TupdateHelper(xid, rid, dat, op, p);
} compensate;
/* if(*page_type_ptr(p) == INDIRECT_PAGE) {
releasePage(p);
rid = dereferenceRID(rid);
p = loadPage(rid.page);
/ ** @todo Kludge! Shouldn't special case operations in transactional2. * /
} else if(*page_type_ptr(p) == ARRAY_LIST_PAGE &&
op != OPERATION_LINEAR_INSERT &&
op != OPERATION_UNDO_LINEAR_INSERT &&
op != OPERATION_LINEAR_DELETE &&
op != OPERATION_UNDO_LINEAR_DELETE ) {
rid = dereferenceArrayListRid(p, rid.slot);
releasePage(p);
p = loadPage(rid.page);
} */
e = LogUpdate(&XactionTable[xid % MAX_TRANSACTIONS], p, rid, op, dat);
assert(XactionTable[xid % MAX_TRANSACTIONS].prevLSN == e->LSN);
DEBUG("Tupdate() e->LSN: %ld\n", e->LSN);
doUpdate(e, p);
releasePage(p);
/* end Tupdate() */
free(e);
}
void TreadUnlocked(int xid, recordid rid, void * dat) {
Page * p = loadPage(xid, rid.page);
Page * p;
try {
p = loadPage(xid, rid.page);
} end;
int page_type = *page_type_ptr(p);
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || !page_type ) {
} else if(page_type == INDIRECT_PAGE) {
releasePage(p);
try {
rid = dereferenceRIDUnlocked(xid, rid);
p = loadPage(xid, rid.page);
} end;
} else if(page_type == ARRAY_LIST_PAGE) {
rid = dereferenceArrayListRidUnlocked(p, rid.slot);
releasePage(p);
try {
p = loadPage(xid, rid.page);
} end;
} else {
abort();

View file

@ -19,17 +19,17 @@ int main(int argc, char ** argv) {
// cht_evals go here...
int xid = 1;//NULL_MACHINE; // What's the deal with this? ;)
clusterHashTable_t * new_ht;
cHtCreate(xid, cht_client, new_ht);
clusterHashTable_t new_ht;
cHtCreate(xid, cht_client, &new_ht);
int i;
for(i = 0; i < 10000; i++) {
int one = i; int two = i+1;
cHtInsert(xid, cht_client, new_ht, &one, sizeof(int), &two, sizeof(int));
cHtInsert(xid, cht_client, &new_ht, &one, sizeof(int), &two, sizeof(int));
int newOne, newTwo;
newOne = i;
newTwo = 0;
unsigned int newLen = sizeof(int);
int ret = cHtLookup(xid, cht_client, new_ht, &newOne, sizeof(int), &newTwo, &newLen);
int ret = cHtLookup(xid, cht_client, &new_ht, &newOne, sizeof(int), &newTwo, &newLen);
// xid++;
//printf("lookup returned %d (%d->%d)\n", ret, newOne, newTwo);
assert(ret);
@ -42,19 +42,19 @@ int main(int argc, char ** argv) {
for(i = 0; i < 10000; i+=10) {
int one = i; int two = -1;
unsigned int size = sizeof(int);
int removed = cHtRemove(xid, cht_client, new_ht, &one, sizeof(int), &two, &size);
int removed = cHtRemove(xid, cht_client, &new_ht, &one, sizeof(int), &two, &size);
assert(removed);
size = sizeof(int);
two = -1;
removed = cHtRemove(xid, cht_client, new_ht, &one, sizeof(int), &two, &size);
removed = cHtRemove(xid, cht_client, &new_ht, &one, sizeof(int), &two, &size);
assert(!removed);
int newOne, newTwo;
newOne = i;
newTwo = 0;
unsigned int newLen = sizeof(int);
int ret = cHtLookup(xid, cht_client, new_ht, &newOne, sizeof(int), &newTwo, &newLen);
int ret = cHtLookup(xid, cht_client, &new_ht, &newOne, sizeof(int), &newTwo, &newLen);
assert(!ret);
@ -68,7 +68,7 @@ int main(int argc, char ** argv) {
newOne = i;
newTwo = 0;
unsigned int newLen = sizeof(int);
int ret = cHtLookup(xid, cht_client, new_ht, &newOne, sizeof(int), &newTwo, &newLen);
int ret = cHtLookup(xid, cht_client, &new_ht, &newOne, sizeof(int), &newTwo, &newLen);
// xid++;
//printf("lookup returned %d (%d->%d)\n", ret, newOne, newTwo);
assert(ret);