diff --git a/libdfa/rw.h b/libdfa/rw.h index 498c5a4..41d473f 100644 --- a/libdfa/rw.h +++ b/libdfa/rw.h @@ -20,7 +20,6 @@ #include #include #include - #ifndef __LIBDFA_RW_H #define __LIBDFA_RW_H typedef struct { diff --git a/lladd/bufferManager.h b/lladd/bufferManager.h index 88ab509..a4f4756 100644 --- a/lladd/bufferManager.h +++ b/lladd/bufferManager.h @@ -100,7 +100,7 @@ typedef struct Page_s Page; * @param pageid ID of the page you want to load * @return fully formed Page type */ -Page * loadPage(int xid, int pageid); +compensated_function Page * loadPage(int xid, int pageid); /** loadPage aquires a lock when it is called, effectively pinning it diff --git a/lladd/common.h b/lladd/common.h index 6161943..4ffe52b 100644 --- a/lladd/common.h +++ b/lladd/common.h @@ -112,6 +112,8 @@ extern int errno; #else #define DEBUG(...) +#include "compensations.h" + #endif /*DEBUGGING*/ #endif /* __lladd_common_h */ diff --git a/lladd/compensations.h b/lladd/compensations.h index c98e7e2..0503abc 100644 --- a/lladd/compensations.h +++ b/lladd/compensations.h @@ -97,7 +97,8 @@ void lock_c_line_1231(lock * l) { properly. Also, begin_action(NULL, NULL) is supported, and is useful for - checking the return value of a called function. + checking the return value of a called function, but, for + efficiency, try{ } end; is recommended */ void compensations_init(); @@ -106,13 +107,19 @@ int compensation_error(); void compensation_clear_error(); void compensation_set_error(int code); +#define try do { if(compensation_error()) return; do +#define try_ret(x) do { if(compensation_error()) return (x); do + +#define end while(0); if(compensation_error()) return; }while(0) +#define end_ret(x) while(0); if(compensation_error()) return (x); }while(0) + #define begin_action(func, var) \ if(compensation_error()) return; \ do{ \ void (*_func_)(void*); \ pthread_cleanup_push(_func_=(void(*)(void*))(func), (void*)(var));\ do - +/** @todo compensation variables don't need _func_ anymore. */ #define end_action \ while(0); \ pthread_cleanup_pop(_func_ && compensation_error()); \ diff --git a/lladd/constants.h b/lladd/constants.h index 3696390..da7cc20 100644 --- a/lladd/constants.h +++ b/lladd/constants.h @@ -173,4 +173,21 @@ terms specified in this license. #define XEND 6 #define CLRLOG 7 +/* Page types */ + +#define UNINITIALIZED_PAGE 0 +#define SLOTTED_PAGE 1 +#define INDIRECT_PAGE 2 +#define LLADD_HEADER_PAGE 3 +#define LLADD_FREE_PAGE 4 +#define FIXED_PAGE 5 +#define ARRAY_LIST_PAGE 6 + +/* Record types */ + +#define UNINITIALIZED_RECORD 0 +#define BLOB_RECORD 1 +#define SLOTTED_RECORD 2 +#define FIXED_RECORD 3 + #endif diff --git a/lladd/lockManager.h b/lladd/lockManager.h index 9ba461f..9824dfb 100644 --- a/lladd/lockManager.h +++ b/lladd/lockManager.h @@ -17,8 +17,8 @@ extern LockManagerSetup globalLockManager; void lockManagerInit(); -int lockManagerReadLockRecord(int xid, recordid rid); -int lockManagerWriteLockRecord(int xid, recordid rid); +compensated_function int lockManagerReadLockRecord(int xid, recordid rid); +compensated_function int lockManagerWriteLockRecord(int xid, recordid rid); int lockManagerUnlockRecord(int xid, recordid rid); int lockManagerCommit(int xid); diff --git a/lladd/operations/linearHashNTA.h b/lladd/operations/linearHashNTA.h index 523f7c8..0921e81 100644 --- a/lladd/operations/linearHashNTA.h +++ b/lladd/operations/linearHashNTA.h @@ -84,6 +84,10 @@ lladd_hash_iterator * ThashIterator(int xid, recordid hash, int keySize, int val */ int ThashNext(int xid, lladd_hash_iterator * it, byte ** key, int * keySize, byte** value, int * valueSize); + +/** Free the hash iterator and its associated resources. */ +void ThashDone(int xid, lladd_hash_iterator * it); + Operation getLinearHashInsert(); Operation getLinearHashRemove(); diff --git a/lladd/operations/set.h b/lladd/operations/set.h index 5baa0d6..0201ef1 100644 --- a/lladd/operations/set.h +++ b/lladd/operations/set.h @@ -74,7 +74,7 @@ Operation getSetRange(); efficiently, it performs a number of extra memcpy() calls over the entire record. */ -void TsetRange(int xid, recordid rid, int offset, int length, const void * dat); +compensated_function void TsetRange(int xid, recordid rid, int offset, int length, const void * dat); #endif diff --git a/lladd/transactional.h b/lladd/transactional.h index 56d7a19..095efd5 100644 --- a/lladd/transactional.h +++ b/lladd/transactional.h @@ -140,7 +140,7 @@ terms specified in this license. In order to facilitate this, LLADD provides the function TgetRecordType() and guarantess that the first recordid returned by any allocation will point to the same page and slot as the constant ROOT_RECORD. TgetRecordType - will return NULL_RECORD if the record passed to it does not exist. + will return NULLRID if the record passed to it does not exist. Therefore, the following code will safely initialize or reopen a data store: @@ -262,8 +262,8 @@ typedef struct { long size; } recordid; -extern const recordid ZERO_RID; - +extern const recordid ROOT_RECORD; +extern const recordid NULLRID; /** If a recordid's slot field is set to this, then the recordid @@ -379,6 +379,7 @@ void Trevive(int xid, long lsn); */ void TsetXIDCount(int xid); + END_C_DECLS #endif diff --git a/src/apps/cht/cht.h b/src/apps/cht/cht.h index aacf122..6d93800 100644 --- a/src/apps/cht/cht.h +++ b/src/apps/cht/cht.h @@ -141,7 +141,7 @@ DfaSet * cHtCoordinatorInit(char * config_file, short(*get_broadcast_group)(DfaS DfaSet * cHtSubordinateInit(char * config_file, short(*get_broadcast_group)(DfaSet *, Message *), int subordinate_number); int cHtGetXid(state_machine_id* xid, DfaSet * dfaSet); int cHtCommit(state_machine_id xid, DfaSet * dfaSet); -/* int cHtAbort(state_machine_id xid, DfaSet * dfaSet);*/ +int cHtAbort(state_machine_id xid, DfaSet * dfaSet); /** The server side state for a CHT. */ typedef struct { diff --git a/src/lladd/bufferManager.c b/src/lladd/bufferManager.c index 558d35c..5e91543 100644 --- a/src/lladd/bufferManager.c +++ b/src/lladd/bufferManager.c @@ -267,9 +267,10 @@ Page * getPage(int pageid, int locktype) { return ret; } -Page *loadPage(int xid, int pageid) { - if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); } - +compensated_function Page *loadPage(int xid, int pageid) { + try_ret(NULL) { + if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); } + } end_ret(NULL); Page * ret = getPage(pageid, RO); return ret; } diff --git a/src/lladd/lockManager.c b/src/lladd/lockManager.c index 9ab78a4..9a0b681 100644 --- a/src/lladd/lockManager.c +++ b/src/lladd/lockManager.c @@ -186,7 +186,7 @@ int lockManagerWriteLockHashed(int xid, byte * dat, int datLen) { ts.tv_nsec = tv.tv_usec * 1000; if(tod_ret != 0) { perror("Could not get time of day"); - compensation_set_error(LLADD_DEADLOCK); + compensation_set_error(LLADD_INTERNAL_ERROR); return LLADD_INTERNAL_ERROR; } while(ridLock->writers || (ridLock->readers - me)) { @@ -301,10 +301,10 @@ int lockManagerCommitHashed(int xid, int datLen) { return ret; } -int lockManagerReadLockRecord(int xid, recordid rid) { +compensated_function int lockManagerReadLockRecord(int xid, recordid rid) { return lockManagerReadLockHashed(xid, (byte*)&rid, sizeof(recordid)); } -int lockManagerWriteLockRecord(int xid, recordid rid) { +compensated_function int lockManagerWriteLockRecord(int xid, recordid rid) { return lockManagerWriteLockHashed(xid, (byte*)&rid, sizeof(recordid)); } int lockManagerUnlockRecord(int xid, recordid rid) { @@ -314,10 +314,10 @@ int lockManagerCommitRecords(int xid) { return lockManagerCommitHashed(xid, sizeof(recordid)); } -int lockManagerReadLockPage(int xid, int p) { +compensated_function int lockManagerReadLockPage(int xid, int p) { return lockManagerReadLockHashed(xid, (byte*)&p, sizeof(int)); } -int lockManagerWriteLockPage(int xid, int p) { +compensated_function int lockManagerWriteLockPage(int xid, int p) { return lockManagerWriteLockHashed(xid, (byte*)&p, sizeof(int)); } int lockManagerUnlockPage(int xid, int p) { diff --git a/src/lladd/operations/linearHash.c b/src/lladd/operations/linearHash.c index e191e8e..890db3e 100644 --- a/src/lladd/operations/linearHash.c +++ b/src/lladd/operations/linearHash.c @@ -323,8 +323,6 @@ static void recover_split(int xid, recordid hashRid, int i, int next_split, int hashRid.slot = 0; recordid ba = hashRid; ba.slot = next_split; recordid bb = hashRid; bb.slot = next_split + twoToThe(i-1); - recordid NULLRID; NULLRID.page = 0; NULLRID.slot=0; NULLRID.size = -1; - if(headerHashBits <= i && headerNextSplit <= next_split) { @@ -416,7 +414,6 @@ void instant_rehash(int xid, recordid hashRid, int next_split, int i, int keySiz assert(hashRid.size == sizeof(hashEntry) + keySize + valSize); recordid ba = hashRid; ba.slot = next_split; recordid bb = hashRid; bb.slot = next_split + twoToThe(i-1); - recordid NULLRID; NULLRID.page = 0; NULLRID.slot=0; NULLRID.size = -1; // recordid ba_contents; TreadUnlocked(xid, ba, &ba_contents); // recordid bb_contents = NULLRID; @@ -821,21 +818,19 @@ int TlogicalHashLookup(int xid, recordid hashRid, void * key, int keySize, void linearHash_iterator * TlogicalHashIterator(int xid, recordid hashRid) { - recordid NULLRID; NULLRID.page = 0; NULLRID.slot=2; NULLRID.size = -1; linearHash_iterator * ret = malloc(sizeof(linearHash_iterator)); ret->current_hashBucket = 0; ret->current_rid = NULLRID; + ret->current_rid.slot = 2; return ret; } void TlogicalHashIteratorFree(linearHash_iterator * it) { free(it); } linearHash_iteratorPair TlogicalHashIteratorNext(int xid, recordid hashRid, linearHash_iterator * it, int keySize, int valSize) { - recordid NULLRID; NULLRID.page = 0; NULLRID.slot=2; NULLRID.size = -1; recordid * headerRidB = pblHtLookup(openHashes, &hashRid.page, sizeof(int)); hashEntry * e = malloc(sizeof(hashEntry) + keySize + valSize); - linearHash_iteratorPair p;// = malloc(sizeof(linearHash_iteratorPair)); //next.size == 0 -> empty bucket. == -1 -> end of list. @@ -862,6 +857,7 @@ linearHash_iteratorPair TlogicalHashIteratorNext(int xid, recordid hashRid, line p.key = NULL; p.value = NULL; it->current_rid = NULLRID; + it->current_rid.slot = 2; // memcpy(&(it->current_rid), &(NULLRID), sizeof(recordid)); it->current_hashBucket = 0; } else { diff --git a/src/lladd/operations/linearHashNTA.c b/src/lladd/operations/linearHashNTA.c index 63cf945..75bb2bc 100644 --- a/src/lladd/operations/linearHashNTA.c +++ b/src/lladd/operations/linearHashNTA.c @@ -371,3 +371,13 @@ int ThashNext(int xid, lladd_hash_iterator * it, byte ** key, int * keySize, byt } return 1; } + +void ThashDone(int xid, lladd_hash_iterator * it) { + if(it->it) { + free(it->it); + } + if(it->pit) { + free(it->pit); + } + free(it); +} diff --git a/src/lladd/operations/linkedListNTA.c b/src/lladd/operations/linkedListNTA.c index e2dfb03..c61af13 100644 --- a/src/lladd/operations/linkedListNTA.c +++ b/src/lladd/operations/linkedListNTA.c @@ -3,9 +3,18 @@ #include #include #include -#define __USE_GNU + +#define __USE_GNU #include +/*** @todo this is a big hack but it seems necessary to work with + gcc when the moon is full. (thanks mike demmer. ;) */ +#ifndef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP +# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \ + {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER} +#endif + + /** A quick note on the format of linked lists. Each entry consists of a struct with some variable length data appended to it. @@ -31,7 +40,9 @@ @file */ + static pthread_mutex_t linked_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; + static void __TlinkedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize); static int __TlinkedListRemove(int xid, recordid list, const byte * key, int keySize); typedef struct { diff --git a/src/lladd/operations/naiveLinearHash.c b/src/lladd/operations/naiveLinearHash.c index 69b59cb..4b50765 100644 --- a/src/lladd/operations/naiveLinearHash.c +++ b/src/lladd/operations/naiveLinearHash.c @@ -140,7 +140,6 @@ void rehash(int xid, recordid hashRid, int next_split, int i, int keySize, int v assert(hashRid.size == sizeof(hashEntry) + keySize + valSize); recordid ba = hashRid; ba.slot = next_split; recordid bb = hashRid; bb.slot = next_split + twoToThe(i-1); - recordid NULLRID; NULLRID.page = 0; NULLRID.slot=0; NULLRID.size = -1; hashEntry * D_contents = calloc(1,sizeof(hashEntry) + keySize + valSize); hashEntry * A_contents = calloc(1,sizeof(hashEntry) + keySize + valSize); diff --git a/src/lladd/operations/nestedTopActions.c b/src/lladd/operations/nestedTopActions.c index d8d51c4..d274cfd 100644 --- a/src/lladd/operations/nestedTopActions.c +++ b/src/lladd/operations/nestedTopActions.c @@ -64,13 +64,9 @@ extern TransactionLog XactionTable[]; pblHashTable_t * nestedTopActions = NULL; /** @todo this really should be set somewhere globally. */ -static recordid NULLRID; void initNestedTopActions() { nestedTopActions = pblHtCreate(); - NULLRID.page = 0; - NULLRID.slot = 0; - NULLRID.size = -1; } /** @todo TbeginNestedTopAction's API might not be quite right. Are there cases where we need to pass a recordid in? diff --git a/src/lladd/operations/pageOrientedListNTA.c b/src/lladd/operations/pageOrientedListNTA.c index 5abbb79..a0d5b56 100644 --- a/src/lladd/operations/pageOrientedListNTA.c +++ b/src/lladd/operations/pageOrientedListNTA.c @@ -12,11 +12,6 @@ typedef struct { recordid TpagedListAlloc(int xid) { - recordid NULLRID; - NULLRID.page = 0; - NULLRID.slot = 0; - NULLRID.size = -1; - recordid ret = Talloc(xid, sizeof(pagedListHeader)); pagedListHeader header; header.thisPage = 0; diff --git a/src/lladd/operations/set.c b/src/lladd/operations/set.c index b3605f6..6d1ccf2 100644 --- a/src/lladd/operations/set.c +++ b/src/lladd/operations/set.c @@ -95,7 +95,12 @@ static int deOperateRange(int xid, Page * p, lsn_t lsn, recordid rid, const void free(tmp); return 0; } -void TsetRange(int xid, recordid rid, int offset, int length, const void * dat) { +compensated_function void TsetRange(int xid, recordid rid, int offset, int length, const void * dat) { + Page * p; + + try { + p = loadPage(xid, rid.page); + } end; set_range_t * range = malloc(sizeof(set_range_t) + 2 * length); byte * record = malloc(rid.size); @@ -106,7 +111,6 @@ void TsetRange(int xid, recordid rid, int offset, int length, const void * dat) // Copy new value into log structure memcpy(range + 1, dat, length); - Page * p = loadPage(xid, rid.page); // No further locking is necessary here; readRecord protects the // page layout, but attempts at concurrent modification have undefined // results. (See page.c) diff --git a/src/lladd/page.c b/src/lladd/page.c index fb4bd19..9b93295 100644 --- a/src/lladd/page.c +++ b/src/lladd/page.c @@ -107,11 +107,11 @@ void pageWriteLSN(int xid, Page * page, lsn_t lsn) { /* unlocked since we're only called by a function that holds the writelock. */ /* *(long *)(page->memAddr + START_OF_LSN) = page->LSN; */ - begin_action(NULL,NULL) { + try { if(globalLockManager.writeLockPage) { globalLockManager.writeLockPage(xid, page->id); } - } end_action; + } end; if(page->LSN < lsn) { page->LSN = lsn; diff --git a/src/lladd/page.h b/src/lladd/page.h index dddedde..24e5293 100644 --- a/src/lladd/page.h +++ b/src/lladd/page.h @@ -93,13 +93,6 @@ terms specified in this license. BEGIN_C_DECLS -#define UNINITIALIZED_PAGE 0 -#define SLOTTED_PAGE 1 -#define INDIRECT_PAGE 2 -#define LLADD_HEADER_PAGE 3 -#define LLADD_FREE_PAGE 4 -#define FIXED_PAGE 5 -#define ARRAY_LIST_PAGE 6 #define lsn_ptr(page) (((lsn_t *)(&((page)->memAddr[PAGE_SIZE])))-1) #define page_type_ptr(page) (((int*)lsn_ptr((page)))-1) #define end_of_usable_space_ptr(page) page_type_ptr((page)) @@ -111,10 +104,10 @@ BEGIN_C_DECLS #define USABLE_SIZE_OF_PAGE (PAGE_SIZE - sizeof(lsn_t) - sizeof(int)) -#define UNINITIALIZED_RECORD 0 +/*#define UNINITIALIZED_RECORD 0 #define BLOB_RECORD 1 #define SLOTTED_RECORD 2 -#define FIXED_RECORD 3 +#define FIXED_RECORD 3 */ @@ -221,7 +214,7 @@ void pageDeInit(); * @param page You must have a writelock on page before calling this function. * @param lsn The new lsn of the page. If the new lsn is less than the page's current lsn, then the page's lsn will not be changed. */ -void pageWriteLSN(int xid, Page * page, lsn_t lsn); +compensated_function void pageWriteLSN(int xid, Page * page, lsn_t lsn); /** * assumes that the page is already loaded in memory. It takes diff --git a/src/lladd/page/indirect.c b/src/lladd/page/indirect.c index c1c1d4a..61330fe 100644 --- a/src/lladd/page/indirect.c +++ b/src/lladd/page/indirect.c @@ -14,8 +14,11 @@ void indirectInitialize(Page * p, int height) { memset(p->memAddr, INVALID_SLOT, ((int)level_ptr(p)) - ((int)p->memAddr)); } /** @todo locking for dereferenceRID? */ -recordid dereferenceRID(int xid, recordid rid) { - Page * this = loadPage(xid, rid.page); +compensated_function recordid dereferenceRID(int xid, recordid rid) { + Page * this; + try_ret(NULLRID) { + this = loadPage(xid, rid.page); + } end_ret(NULLRID); int offset = 0; int max_slot; while(*page_type_ptr(this) == INDIRECT_PAGE) { @@ -32,7 +35,9 @@ recordid dereferenceRID(int xid, recordid rid) { int nextPage = *page_ptr(this, i); releasePage(this); - this = loadPage(xid, nextPage); + try_ret(NULLRID) { + this = loadPage(xid, nextPage); + } end_ret(NULLRID); } rid.page = this->id; @@ -168,8 +173,11 @@ recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount) return rid; } -unsigned int indirectPageRecordCount(int xid, recordid rid) { - Page * p = loadPage(xid, rid.page); +compensated_function int indirectPageRecordCount(int xid, recordid rid) { + Page * p; + try_ret(-1){ + p = loadPage(xid, rid.page); + }end_ret(-1); int i = 0; unsigned int ret; if(*page_type_ptr(p) == INDIRECT_PAGE) { @@ -193,7 +201,7 @@ unsigned int indirectPageRecordCount(int xid, recordid rid) { } } else { - printf("Unknown page type in indirectPageRecordCount()\n"); + printf("Unknown page type in indirectPageRecordCount\n"); abort(); } releasePage(p); diff --git a/src/lladd/page/indirect.h b/src/lladd/page/indirect.h index 0bda21b..0d25a07 100644 --- a/src/lladd/page/indirect.h +++ b/src/lladd/page/indirect.h @@ -43,11 +43,12 @@ BEGIN_C_DECLS Translates a recordid that points to an indirect block into the physical location of the record. */ -recordid dereferenceRID(int xid, recordid rid); -#define dereferenceRIDUnlocked(y, x) dereferenceRID((y), (x)) +compensated_function recordid dereferenceRID(int xid, recordid rid); +compensated_function static inline recordid dereferenceRIDUnlocked(int xid, recordid rid) {return dereferenceRID(xid,rid);} +//#define dereferenceRIDUnlocked(y, x) dereferenceRID((y), (x)) void indirectInitialize(Page * p, int height); recordid rallocMany(/*int parentPage, lsn_t lsn,*/int xid, int recordSize, int recordCount); -unsigned int indirectPageRecordCount(int xid, recordid rid); +compensated_function int indirectPageRecordCount(int xid, recordid rid); END_C_DECLS diff --git a/src/lladd/page/slotted.c b/src/lladd/page/slotted.c index bb11d1e..5241765 100644 --- a/src/lladd/page/slotted.c +++ b/src/lladd/page/slotted.c @@ -156,8 +156,12 @@ int slottedFreespace(Page * page) { interface? (The xid is there for now, in case it allows some optimizations later. Perhaps it's better to cluster allocations from the same xid on the same page, or something...) + + @todo slottedPreRalloc should understand deadlock, and try another page if deadlock occurs. + + @todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery. */ -recordid slottedPreRalloc(int xid, long size, Page ** pp) { +compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) { recordid ret; @@ -175,18 +179,24 @@ recordid slottedPreRalloc(int xid, long size, Page ** pp) { if(lastFreepage == -1) { lastFreepage = TpageAlloc(xid); - *pp = loadPage(xid, lastFreepage); + try_ret(NULLRID) { + *pp = loadPage(xid, lastFreepage); + } end_ret(NULLRID); assert(*page_type_ptr(*pp) == UNINITIALIZED_PAGE); slottedPageInitialize(*pp); } else { - *pp = loadPage(xid, lastFreepage); + try_ret(NULLRID) { + *pp = loadPage(xid, lastFreepage); + } end_ret(NULLRID); } if(slottedFreespace(*pp) < size ) { releasePage(*pp); lastFreepage = TpageAlloc(xid); - *pp = loadPage(xid, lastFreepage); + try_ret(NULLRID) { + *pp = loadPage(xid, lastFreepage); + } end_ret(NULLRID); slottedPageInitialize(*pp); } @@ -201,24 +211,20 @@ recordid slottedPreRalloc(int xid, long size, Page ** pp) { return ret; } -recordid slottedPreRallocFromPage(int xid, long page, long size, Page **pp) { +compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page **pp) { recordid ret; int isBlob = 0; if(size == BLOB_SLOT) { isBlob = 1; size = sizeof(blob_record_t); } - - *pp = loadPage(xid, page); - + try_ret(NULLRID) { + *pp = loadPage(xid, page); + } end_ret(NULLRID); if(slottedFreespace(*pp) < size) { releasePage(*pp); *pp = NULL; - recordid rid; - rid.page = 0; - rid.slot = 0; - rid.size = -1; - return rid; + return NULLRID; } if(*page_type_ptr(*pp) == UNINITIALIZED_PAGE) { @@ -302,8 +308,8 @@ static void __really_do_ralloc(Page * page, recordid rid) { } -recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) { - +compensated_function recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) { + writelock(page->rwlatch, 376); if(*page_type_ptr(page) != SLOTTED_PAGE) { @@ -347,26 +353,27 @@ recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) { (*slot_length_ptr(page, rid.slot) >= PAGE_SIZE)); } - - pageWriteLSN(xid, page, lsn); - - writeunlock(page->rwlatch); + begin_action_ret(writeunlock, page->rwlatch, NULLRID) { // lock acquired above. + pageWriteLSN(xid, page, lsn); + } compensate_ret(NULLRID); return rid; } -void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) { +compensated_function void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) { - readlock(page->rwlatch, 443); + begin_action(unlock, page->rwlatch) { + readlock(page->rwlatch, 443); + + *slot_ptr(page, rid.slot) = INVALID_SLOT; + *slot_length_ptr(page, rid.slot) = *freelist_ptr(page); + *freelist_ptr(page) = rid.slot; + /* *slot_length_ptr(page, rid.slot) = 0; */ - *slot_ptr(page, rid.slot) = INVALID_SLOT; - *slot_length_ptr(page, rid.slot) = *freelist_ptr(page); - *freelist_ptr(page) = rid.slot; - /* *slot_length_ptr(page, rid.slot) = 0; */ + pageWriteLSN(xid, page, lsn); - pageWriteLSN(xid, page, lsn); + } compensate; - unlock(page->rwlatch); } void slottedReadUnlocked(int xid, Page * page, recordid rid, byte *buff) { @@ -427,7 +434,7 @@ void slottedWrite(int xid, Page * page, lsn_t lsn, recordid rid, const byte *dat /*page->LSN = lsn; *lsn_ptr(page) = lsn * / - pageWriteLSN(page); */ + pageWriteLSN-page); */ unlock(page->rwlatch); } diff --git a/src/lladd/page/slotted.h b/src/lladd/page/slotted.h index 26ab206..21b4bb2 100644 --- a/src/lladd/page/slotted.h +++ b/src/lladd/page/slotted.h @@ -91,12 +91,12 @@ void slottedPageInitialize(Page * p); * @see postRallocSlot the implementation of the second phase. * */ -recordid slottedPreRalloc(int xid, long size, Page**p); +compensated_function recordid slottedPreRalloc(int xid, long size, Page**p); /** Identical to slottedPreRalloc, but allows the user to specify which page the record should be allocated in. */ -recordid slottedPreRallocFromPage(int xid, long page, long size, Page**p); +compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page**p); /** * The second phase of slot allocation. Called after the log entry @@ -113,13 +113,13 @@ recordid slottedPreRallocFromPage(int xid, long page, long size, Page**p); * page for this record (though it is possible that we need to compact * the page) */ -recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid); +compensated_function recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid); /** * Mark the space used by a record for reclaimation. * * @param rid the recordid to be freed. */ -void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid); +compensated_function void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid); void slottedPageInit(); void slottedPageDeinit(); diff --git a/src/lladd/transactional2.c b/src/lladd/transactional2.c index 0836c86..35706f9 100644 --- a/src/lladd/transactional2.c +++ b/src/lladd/transactional2.c @@ -20,6 +20,8 @@ TransactionLog XactionTable[MAX_TRANSACTIONS]; int numActiveXactions = 0; int xidCount = 0; +const recordid ROOT_RECORD = {1, 0, -1}; +const recordid NULLRID = {0,0,-1}; /** Locking for transactional2.c works as follows: diff --git a/test/cht/client.c b/test/cht/client.c index 5c45acd..846dbae 100644 --- a/test/cht/client.c +++ b/test/cht/client.c @@ -13,7 +13,8 @@ int main(int argc, char ** argv) { Tinit(); DfaSet * cht_client = cHtClientInit(conf); - pthread_t main_worker_loop = spawn_main_thread(cht_client); + // pthread_t main_worker_loop = + spawn_main_thread(cht_client); // cht_evals go here... diff --git a/test/lladd/check_lockManager.c b/test/lladd/check_lockManager.c index a029aa9..d2ea4cc 100644 --- a/test/lladd/check_lockManager.c +++ b/test/lladd/check_lockManager.c @@ -30,22 +30,37 @@ void * pageWorkerThread(void * j) { if(rw) { // readlock - if(LLADD_DEADLOCK == globalLockManager.readLockPage(xid, m)) { - k = 0; - globalLockManager.abort(xid); - deadlocks++; - printf("-"); - } - - + int locked = 0; + // begin_action_ret(NULL,NULL, 0) { + if(LLADD_DEADLOCK == globalLockManager.readLockPage(xid, m)) { + k = 0; + globalLockManager.abort(xid); + deadlocks++; + printf("-"); + } + // } end_action_ret(0); + /* if(locked) { + assert(compensation_error() == LLADD_DEADLOCK); + compensation_clear_error(); + } */ } else { // writelock + int locked = 0; + // begin_action_ret(NULL, NULL, 0) { + if(LLADD_DEADLOCK == globalLockManager.writeLockPage(xid, m)) { k = 0; globalLockManager.abort(xid); deadlocks++; printf("-"); + locked = 1; } + /* if(locked) { + int err = compensation_error(); + assert(err == LLADD_DEADLOCK); + compensation_clear_error(); + } */ + // } end_action_ret(0); } } @@ -73,23 +88,27 @@ void * ridWorkerThread(void * j) { if(rw) { // readlock - if(LLADD_DEADLOCK == globalLockManager.readLockRecord(xid, rid)) { - k = 0; - globalLockManager.abort(xid); - deadlocks++; - printf("-"); - } - + + // begin_action_ret(NULL, NULL, 0) { + if(LLADD_DEADLOCK == globalLockManager.readLockRecord(xid, rid)) { + k = 0; + globalLockManager.abort(xid); + deadlocks++; + printf("-"); + } + // } end_action_ret(0); } else { // writelock - if(LLADD_DEADLOCK == globalLockManager.writeLockRecord(xid, rid)) { - k = 0; - globalLockManager.abort(xid); - deadlocks++; - printf("-"); - } + // begin_action_ret(NULL, NULL, 0) { + if(LLADD_DEADLOCK == globalLockManager.writeLockRecord(xid, rid)) { + k = 0; + globalLockManager.abort(xid); + deadlocks++; + printf("-"); + } + // } end_action_ret(0); } } diff --git a/utilities/check_compensations b/utilities/check_compensations index 0019807..98b3750 100755 --- a/utilities/check_compensations +++ b/utilities/check_compensations @@ -49,11 +49,11 @@ foreach my $i (@source) { while(my $line = ) { $num++; my $in = 0; - if ($line =~ /\bbegin_action(_ret)?\b/) { + if ($line =~ /\bbegin_action(_ret)?\s*\(\b/ || $line =~ /\btry(_ret)?\s*[\(\{]/) { $nest++; $in = 1; } - if ($line =~ /\bend_action(_ret)?\b/ || $line =~ /\bcompensate(_ret)?\b/) { + if ($line =~ /}\s*end(_action)?(_ret)?\b/ || $line =~ /}\s*compensate(_ret)?\b/) { $nest--; if($in) { warn "$pwd/$i:$num Cannot handle single line compensation checks\n";