More error handling...
This commit is contained in:
parent
84a20a3c96
commit
41fb85eef0
22 changed files with 580 additions and 441 deletions
|
@ -53,7 +53,7 @@ terms specified in this license.
|
|||
#ifndef __ARRAY_LIST_H
|
||||
#define __ARRAY_LIST_H
|
||||
|
||||
recordid TarrayListAlloc(int xid, int count, int multiplier, int size);
|
||||
compensated_function recordid TarrayListAlloc(int xid, int count, int multiplier, int size);
|
||||
|
||||
Operation getArrayListAlloc();
|
||||
Operation getInitFixed();
|
||||
|
@ -68,6 +68,6 @@ Operation getUnInitPage();
|
|||
|
||||
recordid dereferenceArrayListRid(Page * p, int offset);
|
||||
#define dereferenceArrayListRidUnlocked(x, y) dereferenceArrayListRid((x),(y))
|
||||
int TarrayListExtend(int xid, recordid rid, int slots);
|
||||
int TarrayListInstantExtend(int xid, recordid rid, int slots);
|
||||
compensated_function int TarrayListExtend(int xid, recordid rid, int slots);
|
||||
compensated_function int TarrayListInstantExtend(int xid, recordid rid, int slots);
|
||||
#endif
|
||||
|
|
|
@ -41,17 +41,17 @@ typedef struct {
|
|||
lladd_pagedList_iterator * pit;
|
||||
} lladd_hash_iterator;
|
||||
|
||||
recordid ThashCreate(int xid, int keySize, int valSize);
|
||||
void ThashDelete(int xid, recordid hash);
|
||||
compensated_function recordid ThashCreate(int xid, int keySize, int valSize);
|
||||
compensated_function void ThashDelete(int xid, recordid hash);
|
||||
/* @return 1 if the key was defined, 0 otherwise. */
|
||||
int ThashInsert(int xid, recordid hash, const byte* key, int keySize, const byte* value, int valueSize);
|
||||
compensated_function int ThashInsert(int xid, recordid hash, const byte* key, int keySize, const byte* value, int valueSize);
|
||||
/* @return 1 if the key was defined, 0 otherwise. */
|
||||
int ThashRemove(int xid, recordid hash, const byte* key, int keySize);
|
||||
compensated_function int ThashRemove(int xid, recordid hash, const byte* key, int keySize);
|
||||
|
||||
/** @return size of the value associated with key, or -1 if key not found.
|
||||
(a return value of zero means the key is associated with an
|
||||
empty value.) */
|
||||
int ThashLookup(int xid, recordid hash, const byte* key, int keySize, byte ** value);
|
||||
compensated_function int ThashLookup(int xid, recordid hash, const byte* key, int keySize, byte ** value);
|
||||
/**
|
||||
Allocate a new hash iterator. This API is designed to eventually be
|
||||
overloaded, and is subject to change. If the iterator is run to completion,
|
||||
|
@ -96,3 +96,4 @@ Operation getLinearHashRemove();
|
|||
#define HASH_FILL_FACTOR 0.7
|
||||
|
||||
#endif // __LINEAR_HASH_NTA_H
|
||||
|
||||
|
|
|
@ -65,11 +65,11 @@ terms specified in this license.
|
|||
multiple concurrent transactions. */
|
||||
/*#define REUSE_PAGES */
|
||||
|
||||
int TpageAlloc(int xid/*, int type*/);
|
||||
int TpageAllocMany(int xid, int count/*, int type*/);
|
||||
int TpageDealloc(int xid, int pageid);
|
||||
int TpageSet(int xid, int pageid, byte* dat);
|
||||
int TpageGet(int xid, int pageid, byte* buf);
|
||||
compensated_function int TpageAlloc(int xid/*, int type*/);
|
||||
compensated_function int TpageAllocMany(int xid, int count/*, int type*/);
|
||||
compensated_function int TpageDealloc(int xid, int pageid);
|
||||
compensated_function int TpageSet(int xid, int pageid, byte* dat);
|
||||
compensated_function int TpageGet(int xid, int pageid, byte* buf);
|
||||
/*Operation getPageAlloc();
|
||||
Operation getPageDealloc(); */
|
||||
Operation getPageSet();
|
||||
|
@ -81,5 +81,5 @@ Operation getUpdateFreelistInverse();
|
|||
Operation getFreePageOperation();
|
||||
Operation getAllocFreedPage();
|
||||
Operation getUnallocFreedPage();
|
||||
void pageOperationsInit();
|
||||
compensated_function void pageOperationsInit();
|
||||
#endif
|
||||
|
|
|
@ -89,10 +89,10 @@ typedef struct {
|
|||
|
||||
//recordid dereferencePagedListRID(int xid, recordid rid);
|
||||
/** @return 1 if the key was already in the list. */
|
||||
int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize);
|
||||
int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value);
|
||||
int TpagedListRemove(int xid, recordid list, const byte * key, int keySize);
|
||||
int TpagedListMove(int xid, recordid start_list, recordid end_list, const byte *key, int keySize);
|
||||
compensated_function int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize);
|
||||
compensated_function int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value);
|
||||
compensated_function int TpagedListRemove(int xid, recordid list, const byte * key, int keySize);
|
||||
compensated_function int TpagedListMove(int xid, recordid start_list, recordid end_list, const byte *key, int keySize);
|
||||
/** The linked list iterator can tolerate the concurrent removal of values that
|
||||
it has already returned. In the presence of such removals, the iterator
|
||||
will return the keys and values present in the list as it existed when next()
|
||||
|
|
|
@ -322,14 +322,14 @@ int Tbegin();
|
|||
*
|
||||
* @see operations.h set.h
|
||||
*/
|
||||
void Tupdate(int xid, recordid rid, const void *dat, int op);
|
||||
compensated_function void Tupdate(int xid, recordid rid, const void *dat, int op);
|
||||
|
||||
/**
|
||||
* @param xid transaction ID
|
||||
* @param rid reference to page/slot
|
||||
* @param dat buffer into which data goes
|
||||
*/
|
||||
void Tread(int xid, recordid rid, void *dat);
|
||||
compensated_function void Tread(int xid, recordid rid, void *dat);
|
||||
void TreadUnlocked(int xid, recordid rid, void *dat);
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,10 +35,12 @@ static int getBlockContainingOffset(TarrayListParameters tlp, int offset, int *
|
|||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
recordid TarrayListAlloc(int xid, int count, int multiplier, int size) {
|
||||
|
||||
int firstPage = TpageAllocMany(xid, count+1);
|
||||
compensated_function recordid TarrayListAlloc(int xid, int count, int multiplier, int size) {
|
||||
|
||||
int firstPage;
|
||||
try_ret(NULLRID) {
|
||||
firstPage = TpageAllocMany(xid, count+1);
|
||||
} end_ret(NULLRID);
|
||||
TarrayListParameters tlp;
|
||||
|
||||
tlp.firstPage = firstPage;
|
||||
|
@ -52,8 +54,9 @@ recordid TarrayListAlloc(int xid, int count, int multiplier, int size) {
|
|||
rid.page = firstPage;
|
||||
rid.size = size;
|
||||
rid.slot = 0;
|
||||
|
||||
try_ret(NULLRID) {
|
||||
Tupdate(xid, rid, &tlp, OPERATION_ARRAY_LIST_ALLOC);
|
||||
} end_ret(NULLRID);
|
||||
|
||||
return rid;
|
||||
}
|
||||
|
@ -117,8 +120,11 @@ Operation getArrayListAlloc() {
|
|||
}
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
int TarrayListExtend(int xid, recordid rid, int slots) {
|
||||
Page * p = loadPage(xid, rid.page);
|
||||
compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
|
||||
Page * p;
|
||||
try_ret(compensation_error()) {
|
||||
p = loadPage(xid, rid.page);
|
||||
} end_ret(compensation_error());
|
||||
TarrayListParameters tlp = pageToTLP(p);
|
||||
|
||||
int lastCurrentBlock;
|
||||
|
@ -142,18 +148,21 @@ int TarrayListExtend(int xid, recordid rid, int slots) {
|
|||
for(int i = lastCurrentBlock+1; i <= lastNewBlock; i++) {
|
||||
/* Alloc block i */
|
||||
int blockSize = tlp.initialSize * powl(tlp.multiplier, i);
|
||||
int newFirstPage = TpageAllocMany(xid, blockSize);
|
||||
int newFirstPage;
|
||||
begin_action_ret(releasePage, p, compensation_error()) {
|
||||
newFirstPage = TpageAllocMany(xid, blockSize);
|
||||
} end_action_ret(compensation_error());
|
||||
DEBUG("block %d\n", i);
|
||||
/* Iterate over the storage blocks that are pointed to by our current indirection block. */
|
||||
for(int j = 0; j < blockSize; j++) {
|
||||
DEBUG("page %d (%d)\n", j, j + newFirstPage);
|
||||
tmp2.page = j + newFirstPage;
|
||||
// for(int j = 0; j < blockSize; j++) {
|
||||
// DEBUG("page %d (%d)\n", j, j + newFirstPage);
|
||||
// tmp2.page = j + newFirstPage;
|
||||
/** @todo If we were a little smarter about this, and fixed.c
|
||||
could handle uninitialized blocks correctly, then we
|
||||
wouldn't have to iterate over the datapages in
|
||||
TarrayListExtend() */
|
||||
// Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
}
|
||||
TarrayListExtend() (Fixed?)*/
|
||||
// T update(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
// }
|
||||
|
||||
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
|
||||
/** @todo what does this do to recovery?? */
|
||||
|
@ -162,8 +171,9 @@ int TarrayListExtend(int xid, recordid rid, int slots) {
|
|||
Tset(xid, tmp, &newFirstPage);
|
||||
*page_type_ptr(p) = ARRAY_LIST_PAGE; */
|
||||
/* @todo This is a copy of Tupdate!! Replace it.*/
|
||||
|
||||
begin_action_ret(releasePage, p, compensation_error()) {
|
||||
alTupdate(xid, tmp, &newFirstPage, OPERATION_SET);
|
||||
} end_action_ret(compensation_error());
|
||||
|
||||
DEBUG("Tset: {%d, %d, %d} = %d\n", tmp.page, tmp.slot, tmp.size, newFirstPage);
|
||||
}
|
||||
|
@ -171,16 +181,20 @@ int TarrayListExtend(int xid, recordid rid, int slots) {
|
|||
tmp.slot = MAX_OFFSET_POSITION;
|
||||
|
||||
int newMaxOffset = tlp.maxOffset+slots;
|
||||
*page_type_ptr(p) = FIXED_PAGE;
|
||||
Tset(xid, tmp, &newMaxOffset);
|
||||
*page_type_ptr(p) = ARRAY_LIST_PAGE;
|
||||
releasePage(p);
|
||||
// *page_type_ptr(p) = FIXED_PAGE;
|
||||
// Tset(xid, tmp, &newMaxOffset);
|
||||
// *page_type_ptr(p) = ARRAY_LIST_PAGE;
|
||||
// releasePage(p);
|
||||
/* @todo This is a copy of Tupdate!! Replace it.*/
|
||||
begin_action_ret(releasePage, p, compensation_error()) {
|
||||
alTupdate(xid, tmp, &newMaxOffset, OPERATION_SET);
|
||||
} compensate_ret(compensation_error());
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
/** @todo: TarrayListInstantExtend, is a hacked-up cut and paste version of TarrayListExtend */
|
||||
int TarrayListInstantExtend(int xid, recordid rid, int slots) {
|
||||
compensated_function int TarrayListInstantExtend(int xid, recordid rid, int slots) {
|
||||
Page * p = loadPage(xid, rid.page);
|
||||
TarrayListParameters tlp = pageToTLP(p);
|
||||
|
||||
|
|
|
@ -31,34 +31,45 @@ typedef struct {
|
|||
|
||||
|
||||
/* private methods... */
|
||||
static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * lhh);
|
||||
compensated_function static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * lhh);
|
||||
|
||||
|
||||
#define HASH_INIT_ARRAY_LIST_COUNT twoToThe(HASH_INIT_BITS)
|
||||
#define HASH_INIT_ARRAY_LIST_MULT 2
|
||||
|
||||
recordid ThashCreate(int xid, int keySize, int valueSize) {
|
||||
recordid hashHeader = Talloc(xid, sizeof(lladd_hash_header));
|
||||
compensated_function recordid ThashCreate(int xid, int keySize, int valueSize) {
|
||||
recordid hashHeader;
|
||||
lladd_hash_header lhh;
|
||||
|
||||
try_ret(NULLRID) {
|
||||
hashHeader = Talloc(xid, sizeof(lladd_hash_header));
|
||||
if(keySize == VARIABLE_LENGTH || valueSize == VARIABLE_LENGTH) {
|
||||
lhh.buckets = TarrayListAlloc(xid, HASH_INIT_ARRAY_LIST_COUNT, HASH_INIT_ARRAY_LIST_MULT, sizeof(pagedListHeader));
|
||||
} else {
|
||||
lhh.buckets = TarrayListAlloc(xid, HASH_INIT_ARRAY_LIST_COUNT, HASH_INIT_ARRAY_LIST_MULT, sizeof(lladd_linkedList_entry) + keySize + valueSize);
|
||||
}
|
||||
} end_ret(NULLRID);
|
||||
try_ret(NULLRID) {
|
||||
TarrayListExtend(xid, lhh.buckets, HASH_INIT_ARRAY_LIST_COUNT);
|
||||
} end_ret(NULLRID);
|
||||
int i;
|
||||
recordid bucket = lhh.buckets;
|
||||
if(keySize == VARIABLE_LENGTH || valueSize == VARIABLE_LENGTH) {
|
||||
for(i = 0; i < HASH_INIT_ARRAY_LIST_COUNT; i++) {
|
||||
try_ret(NULLRID) {
|
||||
recordid rid = TpagedListAlloc(xid);
|
||||
bucket.slot = i;
|
||||
Tset(xid, bucket, &rid);
|
||||
} end_ret(NULLRID);
|
||||
|
||||
}
|
||||
} else {
|
||||
byte * entry = calloc(1, lhh.buckets.size);
|
||||
for(i = 0; i < HASH_INIT_ARRAY_LIST_COUNT; i++) {
|
||||
bucket.slot = i;
|
||||
begin_action_ret(free, entry, NULLRID) {
|
||||
Tset(xid, bucket, entry);
|
||||
} end_action_ret(NULLRID);
|
||||
}
|
||||
free (entry);
|
||||
}
|
||||
|
@ -67,17 +78,18 @@ recordid ThashCreate(int xid, int keySize, int valueSize) {
|
|||
lhh.nextSplit = 0;
|
||||
lhh.bits = HASH_INIT_BITS;
|
||||
lhh.numEntries = 0;
|
||||
|
||||
try_ret(NULLRID) {
|
||||
Tset(xid, hashHeader, &lhh);
|
||||
} end_ret(NULLRID);
|
||||
return hashHeader;
|
||||
}
|
||||
|
||||
void ThashDelete(int xid, recordid hash) {
|
||||
compensated_function void ThashDelete(int xid, recordid hash) {
|
||||
abort();
|
||||
}
|
||||
|
||||
static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize);
|
||||
static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize);
|
||||
compensated_function static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize);
|
||||
compensated_function static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize);
|
||||
|
||||
typedef struct {
|
||||
recordid hashHeader;
|
||||
|
@ -90,7 +102,7 @@ typedef struct {
|
|||
int valueSize;
|
||||
} linearHash_remove_arg;
|
||||
|
||||
static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
compensated_function static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
const linearHash_remove_arg * args = dat;
|
||||
recordid hashHeader = args->hashHeader;
|
||||
int keySize = args->keySize;
|
||||
|
@ -98,21 +110,23 @@ static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void
|
|||
|
||||
byte * key = (byte*)(args+1);
|
||||
byte * value = ((byte*)(args+1))+ keySize;
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
__ThashInsert(xid, hashHeader, key, keySize, value, valueSize);
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
} compensate_ret(compensation_error());
|
||||
return 0;
|
||||
}
|
||||
static int operateRemove(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
compensated_function static int operateRemove(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
const linearHash_insert_arg * args = dat;
|
||||
recordid hashHeader = args->hashHeader;
|
||||
int keySize = args->keySize;
|
||||
|
||||
byte * key = (byte*)(args + 1);
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
__ThashRemove(xid, hashHeader, key, keySize);
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
} compensate_ret(compensation_error());
|
||||
|
||||
return 0;
|
||||
}
|
||||
Operation getLinearHashInsert() {
|
||||
|
@ -134,26 +148,36 @@ Operation getLinearHashRemove() {
|
|||
return o;
|
||||
}
|
||||
|
||||
int ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize) {
|
||||
compensated_function int ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize) {
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
int argSize = sizeof(linearHash_insert_arg)+keySize;
|
||||
linearHash_insert_arg * arg = malloc(argSize);
|
||||
arg->hashHeader = hashHeader;
|
||||
arg->keySize = keySize;
|
||||
memcpy(arg+1, key, keySize);
|
||||
void * handle = TbeginNestedTopAction(xid, OPERATION_LINEAR_HASH_INSERT, (byte*)arg, argSize);
|
||||
|
||||
int ret;
|
||||
|
||||
/** @todo MEMORY LEAK arg, handle on pthread_cancel.. */
|
||||
void * handle;
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
handle = TbeginNestedTopAction(xid, OPERATION_LINEAR_HASH_INSERT, (byte*)arg, argSize);
|
||||
free(arg);
|
||||
int ret = __ThashInsert(xid, hashHeader, key, keySize, value, valueSize);
|
||||
ret = __ThashInsert(xid, hashHeader, key, keySize, value, valueSize);
|
||||
} end_action_ret(compensation_error());
|
||||
// beg in_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
TendNestedTopAction(xid, handle);
|
||||
// } comp ensate_ret(compensation_error());
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
return ret;
|
||||
}
|
||||
static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize) {
|
||||
compensated_function static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, const byte* value, int valueSize) {
|
||||
lladd_hash_header lhh;
|
||||
|
||||
try_ret(compensation_error()) {
|
||||
Tread(xid, hashHeader, &lhh);
|
||||
|
||||
} end_ret(compensation_error());
|
||||
lhh.numEntries ++;
|
||||
try_ret(compensation_error()) {
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
if(lhh.numEntries > (int)((double)(lhh.nextSplit + twoToThe(lhh.bits-1)) * (HASH_FILL_FACTOR))) {
|
||||
ThashSplitBucket(xid, hashHeader, &lhh);
|
||||
|
@ -163,22 +187,28 @@ static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keyS
|
|||
ThashSplitBucket(xid, hashHeader, &lhh);
|
||||
}
|
||||
}
|
||||
} end_ret(compensation_error());
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
int ret;
|
||||
try_ret(compensation_error()) {
|
||||
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
|
||||
recordid bucketList;
|
||||
|
||||
Tread(xid, bucket, &bucketList);
|
||||
|
||||
// int before = TpagedListSpansPages(xid, bucketList);
|
||||
|
||||
ret = TpagedListInsert(xid, bucketList, key, keySize, value, valueSize);
|
||||
|
||||
// int after = TpagedListSpansPages(xid, bucketList);
|
||||
// if(before != after) { // Page overflowed...
|
||||
// ThashSplitBucket(xid, hashHeader, &lhh);
|
||||
// ThashSplitBucket(xid, hashHeader, &lhh);
|
||||
// T hashSplitBucket(xid, hashHeader, &lhh);
|
||||
// T hashSplitBucket(xid, hashHeader, &lhh);
|
||||
// }
|
||||
|
||||
} else {
|
||||
|
@ -188,17 +218,27 @@ static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keyS
|
|||
if(ret) { lhh.numEntries--; }
|
||||
Tset(xid, hashHeader, &lhh);
|
||||
|
||||
} end_ret(compensation_error());
|
||||
|
||||
return ret;
|
||||
}
|
||||
int ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
||||
compensated_function int ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
||||
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
byte * value;
|
||||
int valueSize = ThashLookup(xid, hashHeader, key, keySize, &value);
|
||||
int valueSize;
|
||||
int ret;
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
valueSize = ThashLookup(xid, hashHeader, key, keySize, &value);
|
||||
} end_action_ret(compensation_error());
|
||||
|
||||
if(valueSize == -1) {
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
|
||||
int argSize = sizeof(linearHash_remove_arg) + keySize + valueSize;
|
||||
linearHash_remove_arg * arg = malloc(argSize);
|
||||
arg->hashHeader = hashHeader;
|
||||
|
@ -206,51 +246,59 @@ int ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
|||
arg->valueSize = valueSize;
|
||||
memcpy(arg+1, key, keySize);
|
||||
memcpy((byte*)(arg+1)+keySize, value, valueSize);
|
||||
void * handle;
|
||||
|
||||
void * handle = TbeginNestedTopAction(xid, OPERATION_LINEAR_HASH_REMOVE, (byte*)arg, argSize);
|
||||
handle = TbeginNestedTopAction(xid, OPERATION_LINEAR_HASH_REMOVE, (byte*)arg, argSize);
|
||||
free(arg);
|
||||
free(value);
|
||||
int ret = __ThashRemove(xid, hashHeader, key, keySize);
|
||||
|
||||
ret = __ThashRemove(xid, hashHeader, key, keySize);
|
||||
TendNestedTopAction(xid, handle);
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
|
||||
} compensate_ret(compensation_error());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
||||
compensated_function static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
||||
int ret;
|
||||
|
||||
try_ret(compensation_error()) {
|
||||
lladd_hash_header lhh;
|
||||
Tread(xid, hashHeader, &lhh);
|
||||
|
||||
lhh.numEntries--;
|
||||
|
||||
Tset(xid, hashHeader, &lhh);
|
||||
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
int ret;
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
recordid bucketList;
|
||||
Tread(xid, bucket, &bucketList);
|
||||
ret = TpagedListRemove(xid, bucketList, key, keySize);
|
||||
} else {
|
||||
if(lhh.keySize != keySize) { compensation_set_error(LLADD_INTERNAL_ERROR); }
|
||||
assert(lhh.keySize == keySize);
|
||||
ret = TlinkedListRemove(xid, bucket, key, keySize);
|
||||
}
|
||||
} end_ret(compensation_error());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ThashLookup(int xid, recordid hashHeader, const byte * key, int keySize, byte ** value) {
|
||||
compensated_function int ThashLookup(int xid, recordid hashHeader, const byte * key, int keySize, byte ** value) {
|
||||
lladd_hash_header lhh;
|
||||
int ret;
|
||||
|
||||
// This whole thing is safe since the callee's do not modify global state...
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &linear_hash_mutex, compensation_error()) {
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
Tread(xid, hashHeader, &lhh);
|
||||
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
int ret;
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
recordid bucketList;
|
||||
Tread(xid, bucket, &bucketList);
|
||||
|
@ -259,11 +307,14 @@ int ThashLookup(int xid, recordid hashHeader, const byte * key, int keySize, byt
|
|||
assert(lhh.keySize == keySize);
|
||||
ret = TlinkedListFind(xid, bucket, key, keySize, value);
|
||||
}
|
||||
pthread_mutex_unlock(&linear_hash_mutex);
|
||||
} compensate_ret(compensation_error());
|
||||
|
||||
return ret;
|
||||
}
|
||||
static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * lhh) {
|
||||
compensated_function static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * lhh) {
|
||||
// if(1) { return; }
|
||||
|
||||
try {
|
||||
long old_bucket = lhh->nextSplit;
|
||||
long new_bucket = old_bucket + twoToThe(lhh->bits-1);
|
||||
recordid old_bucket_rid = lhh->buckets;
|
||||
|
@ -287,14 +338,14 @@ static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * l
|
|||
lhh->nextSplit = 0;
|
||||
lhh->bits++;
|
||||
}
|
||||
|
||||
/** @todo linearHashNTA's split bucket should use the 'move' function call. */
|
||||
if(lhh->keySize == VARIABLE_LENGTH || lhh->valueSize == VARIABLE_LENGTH) {
|
||||
recordid old_bucket_list;
|
||||
// recordid new_bucket_list;
|
||||
Tread(xid, old_bucket_rid, &old_bucket_list);
|
||||
|
||||
// Tread(xid, new_bucket_rid, &(new_bucket_list.page)); // @todo could remember value from above.
|
||||
lladd_pagedList_iterator * pit = TpagedListIterator(xid, old_bucket_list);
|
||||
|
||||
byte *key, *value;
|
||||
int keySize, valueSize;
|
||||
while(TpagedListNext(xid, pit, &key, &keySize, &value, &valueSize)) {
|
||||
|
@ -320,11 +371,14 @@ static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * l
|
|||
free(value);
|
||||
}
|
||||
}
|
||||
// TendNestedTopAction(xid, handle);
|
||||
} end;
|
||||
|
||||
// TendNestedTopAction(xid, handle);
|
||||
return;
|
||||
}
|
||||
lladd_hash_iterator * ThashIterator(int xid, recordid hashHeader, int keySize, int valueSize) {
|
||||
lladd_hash_iterator * it = malloc(sizeof(lladd_hash_iterator));
|
||||
begin_action_ret(free, it, NULL) {
|
||||
it->hashHeader = hashHeader;
|
||||
lladd_hash_header lhh;
|
||||
Tread(xid, hashHeader, &lhh);
|
||||
|
@ -340,13 +394,16 @@ lladd_hash_iterator * ThashIterator(int xid, recordid hashHeader, int keySize, i
|
|||
it->pit = NULL;
|
||||
it->it = TlinkedListIterator(xid, it->bucket, it->keySize, it->valueSize);
|
||||
}
|
||||
} end_action_ret(NULL);
|
||||
return it;
|
||||
}
|
||||
|
||||
int ThashNext(int xid, lladd_hash_iterator * it, byte ** key, int * keySize, byte** value, int * valueSize) {
|
||||
try_ret(0) {
|
||||
if(it->it) {
|
||||
assert(!it->pit);
|
||||
while(!TlinkedListNext(xid, it->it, key, keySize, value, valueSize)) {
|
||||
if(compensation_error()) { return 0; }
|
||||
it->bucket.slot++;
|
||||
if(it->bucket.slot < it->numBuckets) {
|
||||
it->it = TlinkedListIterator(xid, it->bucket, it->keySize, it->valueSize);
|
||||
|
@ -358,6 +415,7 @@ int ThashNext(int xid, lladd_hash_iterator * it, byte ** key, int * keySize, byt
|
|||
} else {
|
||||
assert(it->pit);
|
||||
while(!TpagedListNext(xid, it->pit, key, keySize, value, valueSize)) {
|
||||
if(compensation_error()) { return 0; }
|
||||
it->bucket.slot++;
|
||||
if(it->bucket.slot < it->numBuckets) {
|
||||
recordid bucketList;
|
||||
|
@ -369,6 +427,7 @@ int ThashNext(int xid, lladd_hash_iterator * it, byte ** key, int * keySize, byt
|
|||
}
|
||||
}
|
||||
}
|
||||
} end_ret(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ typedef struct {
|
|||
int valueSize;
|
||||
} lladd_linkedListRemove_log;
|
||||
|
||||
static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
compensated_function static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
assert(!p);
|
||||
lladd_linkedListRemove_log * log = (lladd_linkedListRemove_log*)dat;
|
||||
|
||||
|
@ -76,7 +76,7 @@ static int operateInsert(int xid, Page *p, lsn_t lsn, recordid rid, const void
|
|||
|
||||
return 0;
|
||||
}
|
||||
static int operateRemove(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
compensated_function static int operateRemove(int xid, Page *p, lsn_t lsn, recordid rid, const void *dat) {
|
||||
assert(!p);
|
||||
lladd_linkedListRemove_log * log = (lladd_linkedListRemove_log*)dat;
|
||||
|
||||
|
@ -133,7 +133,11 @@ Operation getLinkedListRemove() {
|
|||
}
|
||||
static void __TlinkedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
|
||||
//int ret = TlinkedListRemove(xid, list, key, keySize);
|
||||
|
||||
try {
|
||||
|
||||
lladd_linkedList_entry * entry = malloc(sizeof(lladd_linkedList_entry) + keySize + valueSize);
|
||||
|
||||
Tread(xid, list, entry);
|
||||
if(!entry->next.size) {
|
||||
memcpy(entry+1, key, keySize);
|
||||
|
@ -154,39 +158,48 @@ static void __TlinkedListInsert(int xid, recordid list, const byte * key, int ke
|
|||
free(newEntry);
|
||||
}
|
||||
free(entry);
|
||||
//return ret;
|
||||
} end;
|
||||
}
|
||||
|
||||
int TlinkedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value) {
|
||||
|
||||
lladd_linkedList_entry * entry = malloc(list.size);
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &linked_list_mutex, -2) {
|
||||
pthread_mutex_lock(&linked_list_mutex);
|
||||
Tread(xid, list, entry);
|
||||
} end_action_ret(-2);
|
||||
|
||||
if(!entry->next.size) {
|
||||
free(entry);
|
||||
pthread_mutex_unlock(&linked_list_mutex);
|
||||
return -1; // empty list
|
||||
}
|
||||
while(1) {
|
||||
|
||||
int done = 0;
|
||||
int ret = -1;
|
||||
begin_action_ret(pthread_mutex_unlock, &linked_list_mutex, -2) {
|
||||
while(!done) {
|
||||
|
||||
if(!memcmp(entry + 1, key, keySize)) {
|
||||
// Bucket contains the entry of interest.
|
||||
int valueSize = list.size - (sizeof(lladd_linkedList_entry) + keySize);
|
||||
*value = malloc(valueSize);
|
||||
memcpy(*value, ((byte*)(entry+1))+keySize, valueSize);
|
||||
free(entry);
|
||||
pthread_mutex_unlock(&linked_list_mutex);
|
||||
return valueSize;
|
||||
done = 1;
|
||||
ret = valueSize;
|
||||
}
|
||||
if(entry->next.size != -1) {
|
||||
assert(entry->next.size == list.size); // Don't handle lists with variable length records for now
|
||||
Tread(xid, entry->next, entry);
|
||||
} else {
|
||||
break;
|
||||
done = 1;
|
||||
}
|
||||
}
|
||||
free(entry);
|
||||
pthread_mutex_unlock(&linked_list_mutex);
|
||||
return -1;
|
||||
} compensate_ret(-2);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -112,19 +112,23 @@ int __alloc_freed(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int TpageGet(int xid, int pageid, byte *memAddr) {
|
||||
Page * q = loadPage(xid, pageid);
|
||||
compensated_function int TpageGet(int xid, int pageid, byte *memAddr) {
|
||||
Page * q = 0;
|
||||
try_ret(compensation_error()) {
|
||||
q = loadPage(xid, pageid);
|
||||
memcpy(memAddr, q->memAddr, PAGE_SIZE);
|
||||
releasePage(q);
|
||||
} end_ret(compensation_error());
|
||||
return 0;
|
||||
}
|
||||
|
||||
int TpageSet(int xid, int pageid, byte * memAddr) {
|
||||
compensated_function int TpageSet(int xid, int pageid, byte * memAddr) {
|
||||
recordid rid;
|
||||
rid.page = pageid;
|
||||
rid.slot = 0;
|
||||
rid.size = 0;
|
||||
try_ret(compensation_error()) {
|
||||
Tupdate(xid,rid,memAddr, OPERATION_PAGE_SET);
|
||||
} end_ret(compensation_error());
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -132,22 +136,22 @@ int TpageSet(int xid, int pageid, byte * memAddr) {
|
|||
/** This needs to be called immediately after the storefile is opened,
|
||||
since it needs to perform raw, synchronous I/O on the pagefile for
|
||||
bootstrapping purposes. */
|
||||
void pageOperationsInit() {
|
||||
compensated_function void pageOperationsInit() {
|
||||
/* Page p;
|
||||
p.rwlatch = initlock();
|
||||
p.loadlatch = initlock();
|
||||
assert(!posix_memalign((void **)&(p.memAddr), PAGE_SIZE, PAGE_SIZE));
|
||||
p.id = 0;*/
|
||||
|
||||
Page * p = loadPage(-1, 0);
|
||||
Page * p;
|
||||
try {
|
||||
p = loadPage(-1, 0);
|
||||
assert(!compensation_error());
|
||||
} end;
|
||||
/** Release lock on page zero. */
|
||||
|
||||
// pageRead(&p);
|
||||
|
||||
if(*page_type_ptr(p) != LLADD_HEADER_PAGE) {
|
||||
/*printf("Writing new LLADD header\n"); fflush(NULL); */
|
||||
headerPageInitialize(p);
|
||||
// pageWrite(p);
|
||||
} else {
|
||||
/*printf("Found LLADD header.\n"); fflush(NULL);*/
|
||||
}
|
||||
|
@ -157,19 +161,13 @@ void pageOperationsInit() {
|
|||
freepage = *headerFreepage_ptr(p);
|
||||
|
||||
assert(freepage);
|
||||
|
||||
/* free(p.memAddr); */
|
||||
|
||||
//deletelock(p.loadlatch);
|
||||
//deletelock(p.rwlatch);
|
||||
releasePage(p);
|
||||
|
||||
pthread_mutex_init(&pageAllocMutex, NULL);
|
||||
|
||||
}
|
||||
|
||||
|
||||
/** @todo TpageAlloc / TpageDealloc + undo is not multi-transaction / threadsafe.
|
||||
/** @todo TpageAlloc / TpageDealloc + undo + page reuse is not multi-transaction / threadsafe.
|
||||
|
||||
Example of the problem:
|
||||
|
||||
|
@ -200,48 +198,39 @@ void pageOperationsInit() {
|
|||
|
||||
*/
|
||||
|
||||
int TpageDealloc(int xid, int pageid) {
|
||||
recordid rid;
|
||||
compensated_function int TpageDealloc(int xid, int pageid) {
|
||||
#ifdef REUSE_PAGES
|
||||
|
||||
update_tuple t;
|
||||
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, -1) {
|
||||
recordid rid;
|
||||
|
||||
update_tuple t;
|
||||
pthread_mutex_lock(&pageAllocMutex);
|
||||
#endif
|
||||
|
||||
rid.page = pageid;
|
||||
rid.slot = 0;
|
||||
rid.size = 0;
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef REUSE_PAGES
|
||||
assert(freelist != pageid);
|
||||
|
||||
|
||||
t.before = freelist;
|
||||
|
||||
//#endif
|
||||
|
||||
|
||||
//#endif
|
||||
Tupdate(xid, rid, &freelist, OPERATION_FREE_PAGE);
|
||||
|
||||
//#ifdef REUSE_PAGES
|
||||
//#ifdef REUSE_PAGES
|
||||
t.after = pageid;
|
||||
freelist = pageid;
|
||||
|
||||
rid.page = 0;
|
||||
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREELIST);
|
||||
|
||||
|
||||
|
||||
/* OLD STUFF: Page * p = loadPage(pageid); int type = *page_type_ptr(p); releasePage(p); Tupdate(xid, rid, &type, OPERATION_PAGE_DEALLOC); */
|
||||
|
||||
pthread_mutex_unlock(&pageAllocMutex);
|
||||
} end_action_ret(-1);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int TpageAlloc(int xid /*, int type */) {
|
||||
compensated_function int TpageAlloc(int xid /*, int type */) {
|
||||
recordid rid;
|
||||
update_tuple t;
|
||||
rid.slot = 0;
|
||||
|
@ -257,11 +246,14 @@ int TpageAlloc(int xid /*, int type */) {
|
|||
|
||||
newpage = freelist;
|
||||
|
||||
Page * p = loadPage(newpage); /* Could obtain write lock here,
|
||||
Page * p;
|
||||
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
|
||||
p = loadPage(newpage); /* Could obtain write lock here,
|
||||
but this is the only function
|
||||
that should ever touch pages of
|
||||
type LLADD_FREE_PAGE, and we
|
||||
already hold a mutex... */
|
||||
} end_ret(compensation_error());
|
||||
assert(*page_type_ptr(p) == LLADD_FREE_PAGE);
|
||||
t.before = freelist;
|
||||
freelist = *nextfreepage_ptr(p);
|
||||
|
@ -269,12 +261,13 @@ int TpageAlloc(int xid /*, int type */) {
|
|||
assert(newpage != freelist);
|
||||
releasePage(p);
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
|
||||
rid.page = newpage;
|
||||
Tupdate(xid, rid, &freelist, OPERATION_ALLOC_FREED);
|
||||
|
||||
rid.page = 0;
|
||||
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREELIST);
|
||||
|
||||
} end_ret;
|
||||
rid.page = newpage;
|
||||
|
||||
} else {
|
||||
|
@ -291,7 +284,10 @@ int TpageAlloc(int xid /*, int type */) {
|
|||
/* Don't need to touch the new page. */
|
||||
|
||||
rid.page = 0;
|
||||
|
||||
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
|
||||
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREESPACE);
|
||||
} end_action_ret(compensation_error());
|
||||
|
||||
rid.page = newpage;
|
||||
#ifdef REUSE_PAGES
|
||||
|
@ -303,7 +299,7 @@ int TpageAlloc(int xid /*, int type */) {
|
|||
return newpage;
|
||||
}
|
||||
|
||||
int TpageAllocMany(int xid, int count /*, int type*/) {
|
||||
compensated_function int TpageAllocMany(int xid, int count /*, int type*/) {
|
||||
/* int firstPage = -1;
|
||||
int lastPage = -1; */
|
||||
recordid rid;
|
||||
|
@ -321,11 +317,11 @@ int TpageAllocMany(int xid, int count /*, int type*/) {
|
|||
/* Don't need to touch the new pages. */
|
||||
|
||||
rid.page = 0;
|
||||
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
|
||||
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREESPACE);
|
||||
|
||||
rid.page = newpage;
|
||||
} compensate_ret(compensation_error());
|
||||
|
||||
pthread_mutex_unlock(&pageAllocMutex);
|
||||
return newpage;
|
||||
}
|
||||
|
||||
|
@ -336,9 +332,9 @@ int TpageAllocMany(int xid, int count /*, int type*/) {
|
|||
obtain mutex
|
||||
choose a free page using in-memory data
|
||||
load page to be used, and update in-memory data. (obtains lock on loaded page)
|
||||
Tupdate() the page, zeroing it, and saving the old successor in the log.
|
||||
T update() the page, zeroing it, and saving the old successor in the log.
|
||||
relase the page (avoid deadlock in next step)
|
||||
Tupdate() LLADD's header page (the first in the store file) with a new copy of
|
||||
T update() LLADD's header page (the first in the store file) with a new copy of
|
||||
the in-memory data, saving old version in the log.
|
||||
release mutex
|
||||
|
||||
|
@ -346,9 +342,9 @@ int TpageAllocMany(int xid, int count /*, int type*/) {
|
|||
|
||||
obtain mutex
|
||||
determine the current head of the freelist using in-memory data
|
||||
Tupdate() the page, initializing it to be a freepage, and physically logging the old version
|
||||
T update() the page, initializing it to be a freepage, and physically logging the old version
|
||||
release the page
|
||||
Tupdate() LLADD's header page with a new copy of the in-memory data, saving old version in the log
|
||||
T update() LLADD's header page with a new copy of the in-memory data, saving old version in the log
|
||||
release mutex
|
||||
|
||||
*/
|
||||
|
|
|
@ -20,7 +20,7 @@ recordid TpagedListAlloc(int xid) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
|
||||
compensated_function int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
|
||||
pagedListHeader header;
|
||||
Tread(xid, list, &header);
|
||||
recordid headerRid = list;
|
||||
|
@ -73,7 +73,7 @@ int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, cons
|
|||
return ret;
|
||||
}
|
||||
|
||||
int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value) {
|
||||
compensated_function int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value) {
|
||||
pagedListHeader header;
|
||||
Tread(xid, list, &header);
|
||||
|
||||
|
@ -113,7 +113,7 @@ int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte *
|
|||
return -1;
|
||||
}
|
||||
|
||||
int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
||||
compensated_function int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
||||
pagedListHeader header;
|
||||
Tread(xid, list, &header);
|
||||
recordid headerRid;
|
||||
|
@ -170,7 +170,7 @@ int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int TpagedListMove(int xid, recordid start_list, recordid end_list, const byte * key, int keySize) {
|
||||
compensated_function int TpagedListMove(int xid, recordid start_list, recordid end_list, const byte * key, int keySize) {
|
||||
byte * value;
|
||||
int valueSize = TpagedListFind(xid, start_list, key, keySize, &value);
|
||||
if(valueSize != -1) {
|
||||
|
|
|
@ -121,11 +121,13 @@ compensated_function void TsetRange(int xid, recordid rid, int offset, int lengt
|
|||
|
||||
// Pass size of range into Tupdate via the recordid.
|
||||
rid.size = sizeof(set_range_t) + 2 * length;
|
||||
Tupdate(xid, rid, range, OPERATION_SET_RANGE);
|
||||
|
||||
releasePage(p);
|
||||
free(record);
|
||||
/** @todo will leak 'range' if interrupted with pthread_cancel */
|
||||
begin_action(releasePage, p) {
|
||||
Tupdate(xid, rid, range, OPERATION_SET_RANGE);
|
||||
free(range);
|
||||
} compensate;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -107,11 +107,11 @@ void pageWriteLSN(int xid, Page * page, lsn_t lsn) {
|
|||
/* unlocked since we're only called by a function that holds the writelock. */
|
||||
/* *(long *)(page->memAddr + START_OF_LSN) = page->LSN; */
|
||||
|
||||
try {
|
||||
if(globalLockManager.writeLockPage) {
|
||||
globalLockManager.writeLockPage(xid, page->id);
|
||||
/* tr y {
|
||||
if(globalLockM anager.writ eLockPage) {
|
||||
globalLock Manager.writeL ockPage(xid, page->id);
|
||||
}
|
||||
} end;
|
||||
} en d; */
|
||||
|
||||
if(page->LSN < lsn) {
|
||||
page->LSN = lsn;
|
||||
|
@ -189,12 +189,12 @@ static int pageAllocUnlocked() {
|
|||
|
||||
lastAllocedPage += 1;
|
||||
|
||||
p = loadPage(lastAllocedPage);
|
||||
p = load Page(lastAllocedPage);
|
||||
/ ** TODO Incorrect, but this kludge tricks the tests (for now) * /
|
||||
while(*page_type_ptr(p) != UNINITIALIZED_PAGE) {
|
||||
releasePage(p);
|
||||
lastAllocedPage++;
|
||||
p = loadPage(lastAllocedPage);
|
||||
p = load Page(lastAllocedPage);
|
||||
}
|
||||
releasePage(p);
|
||||
|
||||
|
@ -268,11 +268,9 @@ void writeRecord(int xid, Page * p, lsn_t lsn, recordid rid, const void *dat) {
|
|||
return lock_ret;
|
||||
} */
|
||||
|
||||
begin_action((void(*)(void*))unlock, p->rwlatch) {
|
||||
writelock(p->rwlatch, 225);
|
||||
pageWriteLSN(xid, p, lsn);
|
||||
} compensate;
|
||||
|
||||
unlock(p->rwlatch);
|
||||
|
||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||
writeBlob(xid, p, lsn, rid, dat);
|
||||
|
@ -391,10 +389,11 @@ void writeRecordUnlocked(int xid, Page * p, lsn_t lsn, recordid rid, const void
|
|||
|
||||
|
||||
// Need a writelock so that we can update the lsn.
|
||||
begin_action(unlock, p->rwlatch) {
|
||||
|
||||
writelock(p->rwlatch, 225);
|
||||
pageWriteLSN(xid, p, lsn);
|
||||
} compensate;
|
||||
unlock(p->rwlatch);
|
||||
|
||||
|
||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||
abort();
|
||||
|
|
|
@ -214,7 +214,7 @@ void pageDeInit();
|
|||
* @param page You must have a writelock on page before calling this function.
|
||||
* @param lsn The new lsn of the page. If the new lsn is less than the page's current lsn, then the page's lsn will not be changed.
|
||||
*/
|
||||
compensated_function void pageWriteLSN(int xid, Page * page, lsn_t lsn);
|
||||
void pageWriteLSN(int xid, Page * page, lsn_t lsn);
|
||||
|
||||
/**
|
||||
* assumes that the page is already loaded in memory. It takes
|
||||
|
|
|
@ -61,17 +61,25 @@ unsigned int calculate_level (unsigned int number_of_pages) {
|
|||
return level;
|
||||
}
|
||||
|
||||
recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount);
|
||||
compensated_function recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount);
|
||||
/**
|
||||
@todo is there a way to implement rallocMany so that it doesn't
|
||||
have to physically log pre- and post-images of the allocated space?
|
||||
*/
|
||||
recordid rallocMany(int xid, int recordSize, int recordCount) {
|
||||
int page = TpageAlloc(xid/*, SLOTTED_PAGE*/);
|
||||
return __rallocMany(xid, page, recordSize, recordCount);
|
||||
compensated_function recordid rallocMany(int xid, int recordSize, int recordCount) {
|
||||
recordid ret;
|
||||
int page;
|
||||
try_ret(NULLRID) {
|
||||
page = TpageAlloc(xid/*, SLOTTED_PAGE*/);
|
||||
}end_ret(NULLRID);
|
||||
try_ret(NULLRID) {
|
||||
ret = __rallocMany(xid, page, recordSize, recordCount);
|
||||
}end_ret(NULLRID);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount) {
|
||||
compensated_function recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount) {
|
||||
|
||||
/* How many levels of pages do we need? */
|
||||
|
||||
|
@ -111,14 +119,19 @@ recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount)
|
|||
}
|
||||
|
||||
int newPageCount = (int)ceil((double)recordCount / (double)next_level_records_per_page);
|
||||
int firstChildPage;
|
||||
|
||||
try_ret(NULLRID) {
|
||||
firstChildPage = TpageAllocMany(xid, newPageCount/*, SLOTTED_PAGE*/);/*pageAllocMultiple(newPageCount); */
|
||||
} end_ret(NULLRID);
|
||||
|
||||
int firstChildPage = TpageAllocMany(xid, newPageCount/*, SLOTTED_PAGE*/);/*pageAllocMultiple(newPageCount); */
|
||||
int tmpRecordCount = recordCount;
|
||||
int thisChildPage = firstChildPage;
|
||||
|
||||
while(tmpRecordCount > 0) {
|
||||
|
||||
try_ret(NULLRID) {
|
||||
__rallocMany(xid, thisChildPage, recordSize, min(tmpRecordCount, next_level_records_per_page));
|
||||
} end_ret(NULLRID);
|
||||
tmpRecordCount -= next_level_records_per_page;
|
||||
thisChildPage ++;
|
||||
|
||||
|
@ -160,8 +173,9 @@ recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount)
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
try_ret(NULLRID) {
|
||||
TpageSet(xid, parentPage, p.memAddr);
|
||||
} end_ret(NULLRID);
|
||||
|
||||
rid.page = parentPage;
|
||||
rid.slot = RECORD_ARRAY;
|
||||
|
|
|
@ -47,7 +47,12 @@ compensated_function recordid dereferenceRID(int xid, recordid rid);
|
|||
compensated_function static inline recordid dereferenceRIDUnlocked(int xid, recordid rid) {return dereferenceRID(xid,rid);}
|
||||
//#define dereferenceRIDUnlocked(y, x) dereferenceRID((y), (x))
|
||||
void indirectInitialize(Page * p, int height);
|
||||
recordid rallocMany(/*int parentPage, lsn_t lsn,*/int xid, int recordSize, int recordCount);
|
||||
|
||||
/* This comment is for check_compensations:
|
||||
compensated_function __rallocMany();
|
||||
*/
|
||||
|
||||
compensated_function recordid rallocMany(/*int parentPage, lsn_t lsn,*/int xid, int recordSize, int recordCount);
|
||||
compensated_function int indirectPageRecordCount(int xid, recordid rid);
|
||||
|
||||
END_C_DECLS
|
||||
|
|
|
@ -178,7 +178,9 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
|||
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
||||
|
||||
if(lastFreepage == -1) {
|
||||
try_ret(NULLRID) {
|
||||
lastFreepage = TpageAlloc(xid);
|
||||
} end_ret(NULLRID);
|
||||
try_ret(NULLRID) {
|
||||
*pp = loadPage(xid, lastFreepage);
|
||||
} end_ret(NULLRID);
|
||||
|
@ -193,7 +195,9 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
|||
|
||||
if(slottedFreespace(*pp) < size ) {
|
||||
releasePage(*pp);
|
||||
try_ret(NULLRID) {
|
||||
lastFreepage = TpageAlloc(xid);
|
||||
} end_ret(NULLRID);
|
||||
try_ret(NULLRID) {
|
||||
*pp = loadPage(xid, lastFreepage);
|
||||
} end_ret(NULLRID);
|
||||
|
@ -308,7 +312,7 @@ static void __really_do_ralloc(Page * page, recordid rid) {
|
|||
|
||||
}
|
||||
|
||||
compensated_function recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
||||
recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
||||
|
||||
writelock(page->rwlatch, 376);
|
||||
|
||||
|
@ -353,16 +357,16 @@ compensated_function recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn,
|
|||
(*slot_length_ptr(page, rid.slot) >= PAGE_SIZE));
|
||||
|
||||
}
|
||||
begin_action_ret(writeunlock, page->rwlatch, NULLRID) { // lock acquired above.
|
||||
|
||||
pageWriteLSN(xid, page, lsn);
|
||||
} compensate_ret(NULLRID);
|
||||
|
||||
writeunlock(page->rwlatch);
|
||||
|
||||
return rid;
|
||||
}
|
||||
|
||||
compensated_function void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
||||
void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
||||
|
||||
begin_action(unlock, page->rwlatch) {
|
||||
readlock(page->rwlatch, 443);
|
||||
|
||||
*slot_ptr(page, rid.slot) = INVALID_SLOT;
|
||||
|
@ -372,7 +376,7 @@ compensated_function void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recor
|
|||
|
||||
pageWriteLSN(xid, page, lsn);
|
||||
|
||||
} compensate;
|
||||
readunlock(page->rwlatch);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -113,13 +113,13 @@ compensated_function recordid slottedPreRallocFromPage(int xid, long page, long
|
|||
* page for this record (though it is possible that we need to compact
|
||||
* the page)
|
||||
*/
|
||||
compensated_function recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid);
|
||||
recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid);
|
||||
/**
|
||||
* Mark the space used by a record for reclaimation.
|
||||
*
|
||||
* @param rid the recordid to be freed.
|
||||
*/
|
||||
compensated_function void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid);
|
||||
void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid);
|
||||
|
||||
void slottedPageInit();
|
||||
void slottedPageDeinit();
|
||||
|
|
|
@ -144,7 +144,7 @@ int Tbegin() {
|
|||
return XactionTable[index].xid;
|
||||
}
|
||||
|
||||
void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
||||
compensated_function void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
||||
LogEntry * e;
|
||||
Page * p;
|
||||
#ifdef DEBUGGING
|
||||
|
@ -152,7 +152,6 @@ void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
|||
assert(numActiveXactions <= MAX_TRANSACTIONS);
|
||||
pthread_mutex_unlock(&transactional_2_mutex);
|
||||
#endif
|
||||
|
||||
p = loadPage(xid, rid.page);
|
||||
|
||||
if(*page_type_ptr(p) == INDIRECT_PAGE) {
|
||||
|
@ -170,6 +169,13 @@ void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
|||
p = loadPage(xid, rid.page);
|
||||
}
|
||||
|
||||
/** @todo For logical undo logs, grabbing a lock makes no sense! */
|
||||
try {
|
||||
if(globalLockManager.writeLockPage) {
|
||||
globalLockManager.writeLockPage(xid, rid.page);
|
||||
}
|
||||
} end;
|
||||
|
||||
e = LogUpdate(&XactionTable[xid % MAX_TRANSACTIONS], p, rid, op, dat);
|
||||
|
||||
assert(XactionTable[xid % MAX_TRANSACTIONS].prevLSN == e->LSN);
|
||||
|
@ -183,11 +189,20 @@ void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
|||
|
||||
}
|
||||
|
||||
void alTupdate(int xid, recordid rid, const void *dat, int op) {
|
||||
compensated_function void alTupdate(int xid, recordid rid, const void *dat, int op) {
|
||||
LogEntry * e;
|
||||
Page * p;
|
||||
|
||||
try {
|
||||
if(globalLockManager.writeLockPage) {
|
||||
globalLockManager.writeLockPage(xid, rid.page);
|
||||
}
|
||||
} end;
|
||||
|
||||
p = loadPage(xid, rid.page);
|
||||
|
||||
|
||||
|
||||
/* if(*page_type_ptr(p) == INDIRECT_PAGE) {
|
||||
releasePage(p);
|
||||
rid = dereferenceRID(rid);
|
||||
|
@ -238,20 +253,29 @@ void TreadUnlocked(int xid, recordid rid, void * dat) {
|
|||
releasePage(p);
|
||||
}
|
||||
|
||||
void Tread(int xid, recordid rid, void * dat) {
|
||||
Page * p = loadPage(xid, rid.page);
|
||||
compensated_function void Tread(int xid, recordid rid, void * dat) {
|
||||
Page * p;
|
||||
try {
|
||||
p = loadPage(xid, rid.page);
|
||||
} end;
|
||||
int page_type = *page_type_ptr(p);
|
||||
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || !page_type ) {
|
||||
|
||||
} else if(page_type == INDIRECT_PAGE) {
|
||||
releasePage(p);
|
||||
try {
|
||||
rid = dereferenceRID(xid, rid);
|
||||
} end;
|
||||
try {
|
||||
p = loadPage(xid, rid.page);
|
||||
} end;
|
||||
|
||||
} else if(page_type == ARRAY_LIST_PAGE) {
|
||||
rid = dereferenceArrayListRid(p, rid.slot);
|
||||
releasePage(p);
|
||||
try {
|
||||
p = loadPage(xid, rid.page);
|
||||
} end;
|
||||
|
||||
} else {
|
||||
abort();
|
||||
|
@ -277,8 +301,6 @@ int Tcommit(int xid) {
|
|||
assert( numActiveXactions >= 0 );
|
||||
pthread_mutex_unlock(&transactional_2_mutex);
|
||||
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ void * pageWorkerThread(void * j) {
|
|||
|
||||
if(rw) {
|
||||
// readlock
|
||||
int locked = 0;
|
||||
// int locked = 0;
|
||||
// begin_action_ret(NULL,NULL, 0) {
|
||||
if(LLADD_DEADLOCK == globalLockManager.readLockPage(xid, m)) {
|
||||
k = 0;
|
||||
|
|
|
@ -46,10 +46,14 @@ foreach my $i (@source) {
|
|||
# open IN, "utilities/cpp_no_inc <$i |";
|
||||
$nest = 0;
|
||||
my $num = 0;
|
||||
my $checked_fcns = 0;
|
||||
while(my $line = <IN>) {
|
||||
$num++;
|
||||
my $in = 0;
|
||||
if ($line =~ /\bbegin_action(_ret)?\s*\(\b/ || $line =~ /\btry(_ret)?\s*[\(\{]/) {
|
||||
if(!$nest) {
|
||||
$checked_fcns = 0;
|
||||
}
|
||||
$nest++;
|
||||
$in = 1;
|
||||
}
|
||||
|
@ -58,17 +62,23 @@ foreach my $i (@source) {
|
|||
if($in) {
|
||||
warn "$pwd/$i:$num Cannot handle single line compensation checks\n";
|
||||
}
|
||||
}
|
||||
if($line !~ /\bcompensated_function\b/) {
|
||||
foreach my $j (keys %h) {
|
||||
if(($line =~ /$j\s*\(/) && !$nest) {
|
||||
warn "$pwd/$i:$num Unchecked invocation of $j\n";
|
||||
}
|
||||
}
|
||||
if ($nest < 0) {
|
||||
warn "$pwd/$i:$num Closed compensation check without open.\n";
|
||||
$nest = 0;
|
||||
}
|
||||
if(!$checked_fcns && !$nest) {
|
||||
warn "$pwd/$i:$num Unnecessary compensation check.\n";
|
||||
}
|
||||
}
|
||||
if($line !~ /\bcompensated_function\b/) {
|
||||
foreach my $j (keys %h) {
|
||||
if(($line =~ /$j\s*\(/)) {
|
||||
if(!$nest) {
|
||||
warn "$pwd/$i:$num Unchecked invocation of $j\n";
|
||||
}
|
||||
$checked_fcns++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue