tests compile under g++ now; fix casts, stack allocated arrays, macro issues, some sign problems
This commit is contained in:
parent
3d707c71b6
commit
330e3bf227
26 changed files with 123 additions and 120 deletions
|
@ -134,6 +134,9 @@ int main(int argc, char* argv[]) {
|
|||
MACRO(CREATE_CHECK NAME)
|
||||
ADD_EXECUTABLE(${NAME} ${NAME}.c)
|
||||
TARGET_LINK_LIBRARIES(${NAME} ${COMMON_LIBRARIES})
|
||||
# SET_SOURCE_FILES_PROPERTIES( ${NAME}.c PROPERTIES LANGUAGE "CXX" )
|
||||
# SET_TARGET_PROPERTIES(${NAME} PROPERTIES LINKER_LANGUAGE "CXX")
|
||||
|
||||
ADD_TEST(${NAME} nice ./${NAME})
|
||||
ENDMACRO(CREATE_CHECK)
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ static pthread_mutex_t mutexes[MUTEX_COUNT];
|
|||
|
||||
static pthread_mutex_t xid_table_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t * getMutex(byte * dat, int datLen) {
|
||||
return &mutexes[stasis_linear_hash(dat, datLen, MUTEX_BITS, MUTEX_EXT)];
|
||||
return &mutexes[HASH_ENTRY(fcn)(dat, datLen, MUTEX_BITS, MUTEX_EXT)];
|
||||
}
|
||||
|
||||
static pblHashTable_t * xidLockTable;
|
||||
|
|
|
@ -205,7 +205,7 @@ static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keyS
|
|||
}
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = stasis_linear_hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
bucket.slot = HASH_ENTRY(fcn)(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
int ret;
|
||||
|
||||
|
@ -271,7 +271,7 @@ static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int key
|
|||
Tset(xid, hashHeader, &lhh);
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = stasis_linear_hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
bucket.slot = HASH_ENTRY(fcn)(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
recordid bucketList;
|
||||
|
@ -296,7 +296,7 @@ int ThashLookup(int xid, recordid hashHeader, const byte * key, int keySize, byt
|
|||
Tread(xid, hashHeader, &lhh);
|
||||
|
||||
recordid bucket = lhh.buckets;
|
||||
bucket.slot = stasis_linear_hash(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
bucket.slot = HASH_ENTRY(fcn)(key, keySize, lhh.bits, lhh.nextSplit);
|
||||
|
||||
if(lhh.keySize == VARIABLE_LENGTH || lhh.valueSize == VARIABLE_LENGTH) {
|
||||
recordid bucketList;
|
||||
|
@ -349,7 +349,7 @@ static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * l
|
|||
byte *key, *value;
|
||||
int keySize, valueSize;
|
||||
while(TpagedListNext(xid, pit, &key, &keySize, &value, &valueSize)) {
|
||||
if(stasis_linear_hash(key, keySize, lhh->bits, lhh->nextSplit) != old_bucket) {
|
||||
if(HASH_ENTRY(fcn)(key, keySize, lhh->bits, lhh->nextSplit) != old_bucket) {
|
||||
TpagedListRemove(xid, old_bucket_list, key, keySize);
|
||||
TpagedListInsert(xid, new_bucket_list, key, keySize, value, valueSize);
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ static void ThashSplitBucket(int xid, recordid hashHeader, lladd_hash_header * l
|
|||
while(TlinkedListNext(xid, it, &key, &keySize, &value, &valueSize)) {
|
||||
assert(valueSize == lhh->valueSize);
|
||||
assert(keySize == lhh->keySize);
|
||||
if(stasis_linear_hash(key, keySize, lhh->bits, lhh->nextSplit) != old_bucket) {
|
||||
if(HASH_ENTRY(fcn)(key, keySize, lhh->bits, lhh->nextSplit) != old_bucket) {
|
||||
TlinkedListRemove(xid, old_bucket_rid, key, keySize);
|
||||
TlinkedListInsert(xid, new_bucket_rid, key, keySize, value, valueSize);
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ static void rehash(int xid, recordid hashRid, pageid_t next_split, pageid_t i, u
|
|||
|
||||
uint64_t old_hash;
|
||||
uint64_t new_hash =
|
||||
2 + stasis_linear_hash(A_contents+1, keySize, i, UINT_MAX);
|
||||
2 + HASH_ENTRY(fcn)(A_contents+1, keySize, i, UINT_MAX);
|
||||
|
||||
while(new_hash != next_split) {
|
||||
// Need a record in A that belongs in the first bucket...
|
||||
|
@ -188,7 +188,7 @@ static void rehash(int xid, recordid hashRid, pageid_t next_split, pageid_t i, u
|
|||
Tset(xid, A, A_contents);
|
||||
Tdealloc(xid, oldANext);
|
||||
|
||||
new_hash = stasis_linear_hash(A_contents+1, keySize, i, UINT_MAX) + 2;
|
||||
new_hash = HASH_ENTRY(fcn)(A_contents+1, keySize, i, UINT_MAX) + 2;
|
||||
}
|
||||
|
||||
B = A_contents->next;
|
||||
|
@ -198,8 +198,8 @@ static void rehash(int xid, recordid hashRid, pageid_t next_split, pageid_t i, u
|
|||
Tread(xid, B, B_contents);
|
||||
C = B_contents->next;
|
||||
|
||||
old_hash = stasis_linear_hash(B_contents+1, keySize, i-1, UINT_MAX) + 2;
|
||||
new_hash = stasis_linear_hash(B_contents+1, keySize, i, UINT_MAX) + 2;
|
||||
old_hash = HASH_ENTRY(fcn)(B_contents+1, keySize, i-1, UINT_MAX) + 2;
|
||||
new_hash = HASH_ENTRY(fcn)(B_contents+1, keySize, i, UINT_MAX) + 2;
|
||||
|
||||
assert(next_split == old_hash);
|
||||
assert(new_hash == old_hash || new_hash == old_hash + stasis_util_two_to_the(i-1));
|
||||
|
@ -404,7 +404,7 @@ void TnaiveHashInsert(int xid, recordid hashRid,
|
|||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
|
||||
int bucket =
|
||||
2 + stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2);
|
||||
2 + HASH_ENTRY(fcn)(key, keySize, headerHashBits, headerNextSplit - 2);
|
||||
|
||||
hashEntry * e = stasis_calloc_trailing_array(hashEntry, keySize + valSize);
|
||||
memcpy(e+1, key, keySize);
|
||||
|
@ -427,7 +427,7 @@ int TnaiveHashDelete(int xid, recordid hashRid,
|
|||
void * key, int keySize, int valSize) {
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
|
||||
int bucket_number = stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
int bucket_number = HASH_ENTRY(fcn)(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
recordid deleteMe;
|
||||
hashRid.slot = bucket_number;
|
||||
|
||||
|
@ -471,7 +471,7 @@ int TnaiveHashClose(int xid, recordid hashRid) {
|
|||
|
||||
int TnaiveHashLookup(int xid, recordid hashRid, void * key, int keySize, void * buf, int valSize) {
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
int bucket_number = stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
int bucket_number = HASH_ENTRY(fcn)(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
int ret = findInBucket(xid, hashRid, bucket_number, key, keySize, buf, valSize);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
|||
struct LH_ENTRY(pair_t) * splitBucketRoot =
|
||||
&(table->bucketList[splitBucket]);
|
||||
while(splitBucketRoot->key &&
|
||||
(HASH_ENTRY()(splitBucketRoot->key, splitBucketRoot->keyLength,
|
||||
(HASH_ENTRY(fcn)(splitBucketRoot->key, splitBucketRoot->keyLength,
|
||||
table->bucketListBits, table->bucketListNextExtension) ==
|
||||
newBucket)) {
|
||||
insertIntoLinkedList(table, newBucket,
|
||||
|
@ -188,7 +188,7 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
|||
splitBucketRoot->key, splitBucketRoot->keyLength);
|
||||
}
|
||||
if(splitBucketRoot->key) {
|
||||
assert(HASH_ENTRY()(splitBucketRoot->key, splitBucketRoot->keyLength,
|
||||
assert(HASH_ENTRY(fcn)(splitBucketRoot->key, splitBucketRoot->keyLength,
|
||||
table->bucketListBits, table->bucketListNextExtension)
|
||||
== splitBucket);
|
||||
} else {
|
||||
|
@ -200,7 +200,7 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
|||
// the list doesn't change its successor.
|
||||
struct LH_ENTRY(pair_t) * newNext = next->next;
|
||||
|
||||
uint64_t hashCode = HASH_ENTRY()(next->key, next->keyLength,
|
||||
uint64_t hashCode = HASH_ENTRY(fcn)(next->key, next->keyLength,
|
||||
table->bucketListBits,
|
||||
table->bucketListNextExtension);
|
||||
|
||||
|
@ -239,7 +239,7 @@ LH_ENTRY(value_t) * LH_ENTRY(insert) (struct LH_ENTRY(table) * table,
|
|||
#ifdef NAIVE_LOCKING
|
||||
pthread_mutex_lock(&(table->lock));
|
||||
#endif
|
||||
intptr_t bucket = HASH_ENTRY()(key, len,
|
||||
intptr_t bucket = HASH_ENTRY(fcn)(key, len,
|
||||
table->bucketListBits, table->bucketListNextExtension);
|
||||
struct LH_ENTRY(pair_t) * thePair = 0;
|
||||
struct LH_ENTRY(pair_t) * junk;
|
||||
|
@ -291,7 +291,7 @@ LH_ENTRY(value_t) * LH_ENTRY(remove) (struct LH_ENTRY(table) * table,
|
|||
#ifdef NAIVE_LOCKING
|
||||
pthread_mutex_lock(&(table->lock));
|
||||
#endif
|
||||
intptr_t bucket = HASH_ENTRY()(key, len,
|
||||
intptr_t bucket = HASH_ENTRY(fcn)(key, len,
|
||||
table->bucketListBits,
|
||||
table->bucketListNextExtension);
|
||||
|
||||
|
@ -308,7 +308,7 @@ LH_ENTRY(value_t) * LH_ENTRY(find)(struct LH_ENTRY(table) * table,
|
|||
#ifdef NAIVE_LOCKING
|
||||
pthread_mutex_lock(&(table->lock));
|
||||
#endif
|
||||
intptr_t bucket = HASH_ENTRY()(key, len,
|
||||
intptr_t bucket = HASH_ENTRY(fcn)(key, len,
|
||||
table->bucketListBits,
|
||||
table->bucketListNextExtension);
|
||||
struct LH_ENTRY(pair_t) * predecessor;
|
||||
|
|
|
@ -17,7 +17,7 @@ typedef struct stasis_btree_data_page_header {
|
|||
PAGE rightSibling;
|
||||
PAGE leftSibling;
|
||||
pthread_rwlock_t latch;
|
||||
};
|
||||
} stasis_btree_data_page_header;
|
||||
|
||||
static inline byte metadata_is_leaf(metadata_t m) {
|
||||
return m & 0x1;
|
||||
|
@ -46,7 +46,7 @@ typedef enum {
|
|||
} color_t;
|
||||
|
||||
static inline color_t leaf_metadata_get_color(metadata_t m) {
|
||||
return (m & (0x4 | 0x8)) >> 2;
|
||||
return (color_t)((m & (0x4 | 0x8)) >> 2);
|
||||
}
|
||||
static inline metadata_t leaf_metadata_set_color(metadata_t m, color_t c) {
|
||||
return (m & ~(0x4 | 0x8)) | (c << 2);
|
||||
|
|
|
@ -51,8 +51,8 @@ static inline pthread_mutex_t * stasis_util_skiplist_get_forward_mutex(
|
|||
return (pthread_mutex_t*)(stasis_util_skiplist_get_forward(x,n)+1);
|
||||
}
|
||||
int stasis_util_skiplist_node_finalize(void * pp, void * conf) {
|
||||
stasis_skiplist_node_t * p = pp;
|
||||
stasis_skiplist_t * list = conf;
|
||||
stasis_skiplist_node_t * p = (stasis_skiplist_node_t *)pp;
|
||||
stasis_skiplist_t * list = (stasis_skiplist_t *)conf;
|
||||
if(p->refcount == 0) {
|
||||
void * oldKey = (void*)p->key; // do this early to find races.
|
||||
for(int i = 1; i <= p->level; i++) {
|
||||
|
@ -78,7 +78,7 @@ int stasis_util_skiplist_default_key_finalize(void * p, void * ignored) {
|
|||
|
||||
|
||||
static inline int stasis_util_skiplist_random_level(pthread_key_t k) {
|
||||
kiss_table_t * kiss = pthread_getspecific(k);
|
||||
kiss_table_t * kiss = (kiss_table_t *)pthread_getspecific(k);
|
||||
if(kiss == 0) {
|
||||
kiss = stasis_alloc(kiss_table_t);
|
||||
stasis_util_random_kiss_settable(kiss,
|
||||
|
@ -153,7 +153,7 @@ static inline void stasis_util_skiplist_deinit(stasis_skiplist_t * list) {
|
|||
hazard_deinit(list->ret_hazard);
|
||||
pthread_mutex_destroy(&list->levelHint_mut);
|
||||
free((void*)list->header);
|
||||
kiss_table_t * kiss = pthread_getspecific(list->k);
|
||||
kiss_table_t * kiss = (kiss_table_t *)pthread_getspecific(list->k);
|
||||
if(kiss) {
|
||||
stasis_util_skiplist_cleanup_tls(kiss);
|
||||
pthread_setspecific(list->k, 0);
|
||||
|
@ -166,12 +166,12 @@ static inline void * stasis_util_skiplist_search(stasis_skiplist_t * list, void
|
|||
// the = 0 here are to silence GCC -O3 warnings.
|
||||
stasis_skiplist_node_t *x, *y = 0;
|
||||
int cmp = 0;
|
||||
x = hazard_set(list->h,0,(void*)list->header);
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h,0,(void*)list->header);
|
||||
for(int i = list->levelHint; i > 0; i--) {
|
||||
y = hazard_ref(list->h,1,stasis_util_skiplist_get_forward(x, i));
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h,1,stasis_util_skiplist_get_forward(x, i));
|
||||
while((cmp = stasis_util_skiplist_cmp_helper(list, y, searchKey)) < 0) {
|
||||
x = hazard_set(list->h,0,(void*)y);
|
||||
y = hazard_ref(list->h,1,stasis_util_skiplist_get_forward(x, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h,0,(void*)y);
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h,1,stasis_util_skiplist_get_forward(x, i));
|
||||
}
|
||||
}
|
||||
void * ret;
|
||||
|
@ -188,20 +188,20 @@ static inline void * stasis_util_skiplist_search(stasis_skiplist_t * list, void
|
|||
static inline stasis_skiplist_node_t * stasis_util_skiplist_get_lock(
|
||||
stasis_skiplist_t * list, stasis_skiplist_node_t * x, void * searchKey, int i) {
|
||||
stasis_skiplist_node_t * z
|
||||
= hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
= (stasis_skiplist_node_t *)hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
while(stasis_util_skiplist_cmp_helper(list, z, searchKey) < 0) {
|
||||
x = hazard_set(list->h, 0, (void*)z);
|
||||
z = hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)z);
|
||||
z = (stasis_skiplist_node_t *)hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
}
|
||||
pthread_mutex_lock(stasis_util_skiplist_get_forward_mutex(x, i));
|
||||
z = hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
z = (stasis_skiplist_node_t *)hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
while(stasis_util_skiplist_cmp_helper(list, z, searchKey) < 0) {
|
||||
// Should lock of z be here?
|
||||
pthread_mutex_unlock(stasis_util_skiplist_get_forward_mutex(x, i));
|
||||
x = hazard_set(list->h, 0, (void*)z);
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)z);
|
||||
// Note: lock of z was here (and it was called x)
|
||||
pthread_mutex_lock(stasis_util_skiplist_get_forward_mutex(x, i));
|
||||
z = hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
z = (stasis_skiplist_node_t *)hazard_ref(list->h, 2, stasis_util_skiplist_get_forward(x, i));
|
||||
}
|
||||
stasis_util_skiplist_assert(stasis_util_skiplist_cmp_helper2(list, x, (stasis_skiplist_node_t*)*stasis_util_skiplist_get_forward(x, i)) < 0);
|
||||
hazard_release(list->h, 2);
|
||||
|
@ -212,32 +212,32 @@ static inline stasis_skiplist_node_t * stasis_util_skiplist_get_lock(
|
|||
* @return the old value or null if there was no such value.
|
||||
*/
|
||||
static inline void * stasis_util_skiplist_insert(stasis_skiplist_t * list, void * searchKey) {
|
||||
stasis_skiplist_node_t * update[list->levelCap+1];
|
||||
stasis_skiplist_node_t ** update = stasis_alloca(list->levelCap+1, stasis_skiplist_node_t*);
|
||||
stasis_skiplist_node_t *x, *y;
|
||||
IN:
|
||||
x = hazard_set(list->h, 0, (void*)list->header);
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)list->header);
|
||||
int L = list->levelHint;
|
||||
// for i = L downto 1
|
||||
int i;
|
||||
for(i = L+1; i > 1;) {
|
||||
i--;
|
||||
y = hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
while(stasis_util_skiplist_cmp_helper(list, y, searchKey) < 0) {
|
||||
x = hazard_set(list->h, 0, (void*)y);
|
||||
y = hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)y);
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
}
|
||||
update[i] = hazard_set(list->h, STASIS_SKIPLIST_HP_COUNT+(L-i), x);
|
||||
update[i] = (stasis_skiplist_node_t *)hazard_set(list->h, STASIS_SKIPLIST_HP_COUNT+(L-i), x);
|
||||
}
|
||||
// update[L..1] is set.
|
||||
// h [HP_COUNT+[0..L-1] is set.
|
||||
// Note get_lock grabs the hazard pointer for x.
|
||||
x = stasis_util_skiplist_get_lock(list, x, searchKey, 1);
|
||||
y = hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, 1));
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, 1));
|
||||
if(stasis_util_skiplist_cmp_helper(list, y, searchKey) == 0) {
|
||||
pthread_mutex_unlock(stasis_util_skiplist_get_forward_mutex(x, 1));
|
||||
pthread_mutex_lock(&y->level_mut);
|
||||
|
||||
x = hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, 1));
|
||||
x = (stasis_skiplist_node_t *)hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, 1));
|
||||
int isGarbage = stasis_util_skiplist_cmp_helper(list, x, searchKey) < 0;
|
||||
if(!isGarbage) {
|
||||
void * oldKey;
|
||||
|
@ -262,10 +262,10 @@ IN:
|
|||
}
|
||||
}
|
||||
hazard_ptr newnode = stasis_util_skiplist_make_node(stasis_util_skiplist_random_level(list->k), searchKey);
|
||||
y = hazard_set(list->h, 1, (void*)newnode);
|
||||
y = (stasis_skiplist_node_t *)hazard_set(list->h, 1, (void*)newnode);
|
||||
pthread_mutex_lock(&y->level_mut);
|
||||
for(int i = L+1; i <= y->level; i++) {
|
||||
update[i] = (void*)list->header;
|
||||
update[i] = (stasis_skiplist_node_t *)list->header;
|
||||
}
|
||||
// update[L+1..y->level] is set
|
||||
for(int i = 1; i <= y->level; i++) {
|
||||
|
@ -279,10 +279,10 @@ IN:
|
|||
pthread_mutex_unlock(&y->level_mut);
|
||||
|
||||
int L2 = list->levelHint;
|
||||
if(L2 < list->levelCap && *stasis_util_skiplist_get_forward((void*)list->header, L2+1) != 0) {
|
||||
if(L2 < list->levelCap && *stasis_util_skiplist_get_forward((stasis_skiplist_node_t *)list->header, L2+1) != 0) {
|
||||
if(pthread_mutex_trylock(&list->levelHint_mut) == 0) {
|
||||
while(list->levelHint < list->levelCap &&
|
||||
*stasis_util_skiplist_get_forward((void*)list->header, list->levelHint+1) != 0) {
|
||||
*stasis_util_skiplist_get_forward((stasis_skiplist_node_t *)list->header, list->levelHint+1) != 0) {
|
||||
list->levelHint = list->levelHint+1; // XXX atomics?
|
||||
}
|
||||
pthread_mutex_unlock(&list->levelHint_mut);
|
||||
|
@ -301,23 +301,23 @@ IN:
|
|||
* @return The old value, or null.
|
||||
*/
|
||||
static inline void * stasis_util_skiplist_delete(stasis_skiplist_t * list, void * searchKey) {
|
||||
stasis_skiplist_node_t * update[list->levelCap+1];
|
||||
stasis_skiplist_node_t ** update = stasis_alloca(list->levelCap+1, stasis_skiplist_node_t*);
|
||||
stasis_skiplist_node_t *x, *y;
|
||||
x = hazard_set(list->h, 0, (void*)list->header);
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)list->header);
|
||||
int L = list->levelHint;
|
||||
// for i = L downto 1
|
||||
int i;
|
||||
for(i = L+1; i > 1;) {
|
||||
i--; // decrement after check, so that i is 1 at the end of the loop.
|
||||
y = hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
while(stasis_util_skiplist_cmp_helper(list, y, searchKey) < 0) {
|
||||
x = hazard_set(list->h, 0, (void*)y);
|
||||
y = hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_set(list->h, 0, (void*)y);
|
||||
y = (stasis_skiplist_node_t *)hazard_ref(list->h, 1, stasis_util_skiplist_get_forward(x, i));
|
||||
}
|
||||
update[i] = hazard_set(list->h, STASIS_SKIPLIST_HP_COUNT+(L-i), x);
|
||||
update[i] = (stasis_skiplist_node_t *)hazard_set(list->h, STASIS_SKIPLIST_HP_COUNT+(L-i), x);
|
||||
}
|
||||
// h[HP_COUNT+[0..L-1] is set
|
||||
y = hazard_set(list->h, 1, (void*)x);
|
||||
y = (stasis_skiplist_node_t *)hazard_set(list->h, 1, (void*)x);
|
||||
int isGarbage = 0;
|
||||
int first = 1;
|
||||
// do ... until equal and not garbage
|
||||
|
@ -325,14 +325,14 @@ static inline void * stasis_util_skiplist_delete(stasis_skiplist_t * list, void
|
|||
// Note: it is unsafe to copy y->i directly into y, since doing so releases
|
||||
// the hazard pointer in race. Fortunately, we don't need x for anything
|
||||
// until we overwrite it immediately below.
|
||||
x = hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, i));
|
||||
if(first) {
|
||||
first = 0;
|
||||
} else {
|
||||
// This unlock was not in the pseudocode, but seems to be necessary...
|
||||
pthread_mutex_unlock(&y->level_mut);
|
||||
}
|
||||
y = hazard_set(list->h, 1, x);
|
||||
y = (stasis_skiplist_node_t *)hazard_set(list->h, 1, x);
|
||||
if(stasis_util_skiplist_cmp_helper(list, y, searchKey) > 0) {
|
||||
hazard_release(list->ret_hazard, 0);
|
||||
hazard_release(list->h, 0);
|
||||
|
@ -345,12 +345,12 @@ static inline void * stasis_util_skiplist_delete(stasis_skiplist_t * list, void
|
|||
return NULL;
|
||||
}
|
||||
pthread_mutex_lock(&y->level_mut);
|
||||
x = hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, i));
|
||||
x = (stasis_skiplist_node_t *)hazard_ref(list->h, 0, stasis_util_skiplist_get_forward(y, i));
|
||||
// Note: this is a > in pseudocode, which lets equal nodes link back into themselves.
|
||||
isGarbage = stasis_util_skiplist_cmp_helper2(list, y, x) > 0;
|
||||
// pseudocode would unlock if garbage here. Moved unlock to top of loop.
|
||||
} while(!(!isGarbage && stasis_util_skiplist_cmp_helper(list, y, searchKey) == 0));
|
||||
for(int i = L+1; i <= y->level; i++) { update[i] = (void*)list->header; }
|
||||
for(int i = L+1; i <= y->level; i++) { update[i] = (stasis_skiplist_node_t *)list->header; }
|
||||
for(int i = y->level; i > 0; i--) {
|
||||
x = stasis_util_skiplist_get_lock(list, update[i], searchKey, i);
|
||||
pthread_mutex_lock(stasis_util_skiplist_get_forward_mutex(y, i));
|
||||
|
@ -366,9 +366,9 @@ static inline void * stasis_util_skiplist_delete(stasis_skiplist_t * list, void
|
|||
void * oldKey = hazard_ref(list->ret_hazard, 0, &(y->key));
|
||||
pthread_mutex_unlock(&y->level_mut);
|
||||
int L2 = list->levelHint;
|
||||
if(L2 > 1 && *stasis_util_skiplist_get_forward((void*)list->header, L2) == 0) {
|
||||
if(L2 > 1 && *stasis_util_skiplist_get_forward((stasis_skiplist_node_t *)list->header, L2) == 0) {
|
||||
if(pthread_mutex_trylock(&list->levelHint_mut) == 0) {
|
||||
while(list->levelHint > 1 && (stasis_skiplist_node_t*)*stasis_util_skiplist_get_forward((void*)list->header, list->levelHint) == 0) {
|
||||
while(list->levelHint > 1 && (stasis_skiplist_node_t*)*stasis_util_skiplist_get_forward((stasis_skiplist_node_t *)list->header, list->levelHint) == 0) {
|
||||
list->levelHint = list->levelHint - 1;
|
||||
}
|
||||
pthread_mutex_unlock(&list->levelHint_mut);
|
||||
|
|
|
@ -28,7 +28,7 @@ static inline void HASH_ENTRY(_get_size_params)(uint64_t desiredSize,
|
|||
/**
|
||||
@todo despite it's interface, stasis_linear_hash can't return values > 2^32!
|
||||
*/
|
||||
static inline uint64_t HASH_ENTRY()(const void * val, uint64_t val_length,
|
||||
static inline uint64_t HASH_ENTRY(fcn)(const void * val, uint64_t val_length,
|
||||
unsigned char tableBits, uint64_t nextExtension) {
|
||||
// Calculate the hash value as it was before this round of splitting.
|
||||
unsigned int oldTableLength = stasis_util_two_to_the(tableBits - 1);
|
||||
|
|
|
@ -39,7 +39,7 @@ static int intptr_cmp(const void * ap, const void *bp) {
|
|||
}
|
||||
static inline void hazard_scan(hazard_t * h, hazard_ptr_rec_t * rec) {
|
||||
if(rec == NULL) {
|
||||
rec = pthread_getspecific(h->hp);
|
||||
rec = (hazard_ptr_rec_t*)pthread_getspecific(h->hp);
|
||||
}
|
||||
if(rec == NULL) { return; }
|
||||
qsort(rec->rlist, rec->rlist_len, sizeof(void*), intptr_cmp);
|
||||
|
@ -85,7 +85,7 @@ static inline void hazard_scan(hazard_t * h, hazard_ptr_rec_t * rec) {
|
|||
free(ptrs);
|
||||
}
|
||||
static void hazard_deinit_thread(void * p) {
|
||||
hazard_ptr_rec_t * rec = p;
|
||||
hazard_ptr_rec_t * rec = (hazard_ptr_rec_t*)p;
|
||||
if(rec != NULL) {
|
||||
while(rec->rlist_len != 0) {
|
||||
hazard_scan(rec->h, rec);
|
||||
|
@ -140,7 +140,7 @@ static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
|||
return ret;
|
||||
}
|
||||
static inline hazard_ptr_rec_t * hazard_ensure_tls(hazard_t * h) {
|
||||
hazard_ptr_rec_t * rec = pthread_getspecific(h->hp);
|
||||
hazard_ptr_rec_t * rec = (hazard_ptr_rec_t*)pthread_getspecific(h->hp);
|
||||
if(rec == NULL) {
|
||||
rec = stasis_alloc(hazard_ptr_rec_t);
|
||||
rec->hp = stasis_calloc(h->num_slots, hazard_ptr);
|
||||
|
@ -156,7 +156,7 @@ static inline hazard_ptr_rec_t * hazard_ensure_tls(hazard_t * h) {
|
|||
return rec;
|
||||
}
|
||||
static inline void hazard_deinit(hazard_t * h) {
|
||||
hazard_ptr_rec_t * rec = pthread_getspecific(h->hp);
|
||||
hazard_ptr_rec_t * rec = (hazard_ptr_rec_t*)pthread_getspecific(h->hp);
|
||||
hazard_deinit_thread(rec);
|
||||
pthread_key_delete(h->hp);
|
||||
assert(h->tls_list == NULL);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <stasis/common.h>
|
||||
|
||||
#define stasis_alloca(cnt, typ) ((typ*)alloca((cnt)*sizeof(typ)))
|
||||
#define stasis_alloc(typ) ((typ*)malloc(sizeof(typ)))
|
||||
#define stasis_malloc(cnt, typ) ((typ*)malloc((cnt)*sizeof(typ)))
|
||||
#define stasis_malloc_trailing_array(typ, array_sz) ((typ*)malloc(sizeof(typ)+(array_sz)))
|
||||
|
|
|
@ -63,7 +63,7 @@ START_TEST(boundedLogTest) {
|
|||
pageid_t region_start = TregionAlloc(xid, NUM_XACTS, 0);
|
||||
Tcommit(xid);
|
||||
|
||||
for(uint64_t i = 0; i < NUM_XACTS; i++) {
|
||||
for(int64_t i = 0; i < NUM_XACTS; i++) {
|
||||
xid = Tbegin();
|
||||
TinitializeFixedPage(xid, region_start + i, sizeof(uint64_t));
|
||||
recordid rid = {region_start + i, 0, sizeof(uint64_t)};
|
||||
|
@ -86,7 +86,7 @@ START_TEST(boundedLogConcurrentTest) {
|
|||
pageid_t region_start = TregionAlloc(xid, NUM_XACTS, 0);
|
||||
Tcommit(xid);
|
||||
|
||||
for(uint64_t i = 0; i < NUM_XACTS; i++) {
|
||||
for(int64_t i = 0; i < NUM_XACTS; i++) {
|
||||
int xids[NUM_CONCURRENT_XACTS];
|
||||
for(int j = 0; j < NUM_CONCURRENT_XACTS; j++) {
|
||||
xids[j] = Tbegin();
|
||||
|
|
|
@ -65,7 +65,7 @@ void initializePages(void) {
|
|||
stasis_record_alloc_done(-1, p, rid);
|
||||
int * buf = (int*)stasis_record_write_begin(-1, p, rid);
|
||||
*buf = i;
|
||||
stasis_record_write_done(-1,p,rid,(void*)buf);
|
||||
stasis_record_write_done(-1,p,rid,(byte*)buf);
|
||||
stasis_page_lsn_write(-1, p, 0);
|
||||
unlock(p->rwlatch);
|
||||
releasePage(p);
|
||||
|
@ -344,7 +344,7 @@ static void stalePinTestImpl(stasis_buffer_manager_t * (*fact)(stasis_log_t*, st
|
|||
|
||||
Tinit();
|
||||
|
||||
Page * p[stasis_buffer_manager_size-1];
|
||||
Page ** p = stasis_alloca(stasis_buffer_manager_size-1, Page*);
|
||||
for(int i = 0; i < stasis_buffer_manager_size-2; i++) {
|
||||
p[i] = loadUninitializedPage(-1, i);
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Created on: Dec 22, 2011
|
||||
* Author: sears
|
||||
*/
|
||||
#define _GNU_SOURCE
|
||||
#include "../check_includes.h"
|
||||
|
||||
#include <stasis/util/concurrentHash.h>
|
||||
|
@ -26,7 +25,7 @@ START_TEST(metadataBitTest) {
|
|||
metadata_t m = 0;
|
||||
byte isLeaf = 0;
|
||||
byte balanced = 0;
|
||||
byte color = 0;
|
||||
color_t color = TEMP;
|
||||
byte level = 0;
|
||||
for(int i = 0; i < NUM_ITERS; i++) {
|
||||
switch(stasis_util_random64(3)) {
|
||||
|
@ -61,7 +60,7 @@ START_TEST(metadataBitTest) {
|
|||
case 2: {
|
||||
if(isLeaf) { // color
|
||||
assert(color == leaf_metadata_get_color(m));
|
||||
color = stasis_util_random64(3);
|
||||
color = (color_t)stasis_util_random64(3);
|
||||
m = leaf_metadata_set_color(m, color);
|
||||
} else { // level
|
||||
assert(level == index_metadata_get_level(m));
|
||||
|
|
|
@ -81,7 +81,7 @@ START_TEST(ringBufferSmokeTest) {
|
|||
|
||||
#define PROD_CONS_SIZE (100L * 1024L * 1024L)
|
||||
static void * consumerWorker(void * arg) {
|
||||
stasis_ringbuffer_t * ring = arg;
|
||||
stasis_ringbuffer_t * ring = (stasis_ringbuffer_t *)arg;
|
||||
lsn_t cursor = 0;
|
||||
while(cursor < PROD_CONS_SIZE) {
|
||||
lsn_t rnd_size = stasis_util_random64(2048);
|
||||
|
@ -97,7 +97,7 @@ static void * consumerWorker(void * arg) {
|
|||
return 0;
|
||||
}
|
||||
static void * producerWorker(void * arg) {
|
||||
stasis_ringbuffer_t * ring = arg;
|
||||
stasis_ringbuffer_t * ring = (stasis_ringbuffer_t *)arg;
|
||||
lsn_t cursor = 0;
|
||||
while(cursor < PROD_CONS_SIZE) {
|
||||
int rnd_size = stasis_util_random64(2048);
|
||||
|
@ -134,7 +134,7 @@ typedef struct {
|
|||
stasis_ringbuffer_t * ring;
|
||||
} arg;
|
||||
static void * concurrentReader(void * argp) {
|
||||
arg * a = argp;
|
||||
arg * a = (arg*)argp;
|
||||
stasis_ringbuffer_t * ring = a->ring;
|
||||
lsn_t cursor = 0;
|
||||
lsn_t rd_handle;
|
||||
|
@ -155,7 +155,7 @@ static void * concurrentReader(void * argp) {
|
|||
return 0;
|
||||
}
|
||||
static void * concurrentWriter(void * argp) {
|
||||
arg * a = argp;
|
||||
arg * a = (arg*)argp;
|
||||
stasis_ringbuffer_t * ring = a->ring;
|
||||
lsn_t cursor = 0;
|
||||
lsn_t wr_handle;
|
||||
|
|
|
@ -44,21 +44,21 @@ int num_threads = 4;
|
|||
int concurrent = 0;
|
||||
stasis_skiplist_t * list;
|
||||
void * worker(void* p) {
|
||||
intptr_t * keys = p;
|
||||
intptr_t * keys = (intptr_t*)p;
|
||||
intptr_t collisions = 0;
|
||||
for(int i = 0; i < num_keys; i++) {
|
||||
char * ret = stasis_util_skiplist_insert(list, key_dup(keys[i]));
|
||||
char * ret = (char*)stasis_util_skiplist_insert(list, key_dup(keys[i]));
|
||||
if(ret != NULL) {
|
||||
assert(!stasis_util_skiplist_cmp(ret, &keys[i]));
|
||||
collisions++;
|
||||
}
|
||||
}
|
||||
for(int i = 0; i < num_keys; i++) {
|
||||
char * ret = stasis_util_skiplist_search(list, &keys[i]);
|
||||
char * ret = (char*)stasis_util_skiplist_search(list, &keys[i]);
|
||||
if(!concurrent) assert(!stasis_util_skiplist_cmp(ret, &keys[i]));
|
||||
}
|
||||
for(int i = 0; i < num_keys; i++) {
|
||||
char * ret = stasis_util_skiplist_delete(list, &keys[i]);
|
||||
char * ret = (char*)stasis_util_skiplist_delete(list, &keys[i]);
|
||||
if(ret == NULL) {
|
||||
collisions--;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ START_TEST(concurrentSkipList_smokeTest) {
|
|||
int err = asprintf(&keys[i], "%d", (int)stasis_util_random64(2*num_keys));
|
||||
(void) err;
|
||||
#else
|
||||
keys[i] = (void*)(1+stasis_util_random64(2*num_keys));
|
||||
keys[i] = (char*)(1+stasis_util_random64(2*num_keys));
|
||||
#endif
|
||||
}
|
||||
printf("Initted\n");
|
||||
|
@ -112,7 +112,7 @@ START_TEST(concurrentSkipList_concurrentTest) {
|
|||
int err = asprintf(&keys[i], "%d", (int)stasis_util_random64(2*num_keys));
|
||||
(void) err;
|
||||
#else
|
||||
keys[j][i] = (void*)(1+stasis_util_random64(2*num_keys));
|
||||
keys[j][i] = (char*)(1+stasis_util_random64(2*num_keys));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ START_TEST(concurrentSkipList_concurrentTest) {
|
|||
}
|
||||
for(int j = 0; j < num_threads; j++) {
|
||||
intptr_t ret;
|
||||
pthread_join(threads[j], (void*)&ret);
|
||||
pthread_join(threads[j], (void**)&ret);
|
||||
collisions += ret;
|
||||
#ifdef STRINGS
|
||||
for(int i = 0; i < num_keys; i++) {
|
||||
|
@ -168,7 +168,7 @@ void * worker2(void * p) {
|
|||
}
|
||||
START_TEST(concurrentSkipList_concurrentRandom) {
|
||||
list = stasis_util_skiplist_init(stasis_util_skiplist_cmp, 0);
|
||||
pthread_t thread[num_threads];
|
||||
pthread_t * thread = stasis_alloca(num_threads, pthread_t);
|
||||
for(int i = 0; i < num_threads; i++) {
|
||||
pthread_create(&thread[i], 0, worker2, 0);
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ terms specified in this license.
|
|||
#define NUM_PAGES 100
|
||||
#endif
|
||||
void * worker(void*arg) {
|
||||
stasis_dirty_page_table_t * dpt = stasis_runtime_dirty_page_table();
|
||||
stasis_dirty_page_table_t * dpt = (stasis_dirty_page_table_t *)stasis_runtime_dirty_page_table();
|
||||
for(int i = 0; i < NUM_STEPS; i++) {
|
||||
pageid_t page = stasis_util_random64(NUM_PAGES);
|
||||
Page * p = loadPage(-1, page);
|
||||
|
|
|
@ -76,7 +76,7 @@ START_TEST(filePoolDirTest){
|
|||
last_lsn = e->LSN;
|
||||
log->write_entry(log, e);
|
||||
log->write_entry_done(log, e);
|
||||
if(!(i & 15)) { log->force_tail(log, 0); } // xxx
|
||||
if(!(i & 15)) { log->force_tail(log, LOG_FORCE_COMMIT); } // xxx
|
||||
}
|
||||
|
||||
log->close(log);
|
||||
|
|
|
@ -72,8 +72,8 @@ START_TEST(hazard_smokeTest) {
|
|||
char * b = stasis_malloc(1, char);
|
||||
*a = 0;
|
||||
*b = 1;
|
||||
char * ap = hazard_ref(h, 0, (hazard_ptr*)&a);
|
||||
char * bp = hazard_ref(h, 1, (hazard_ptr*)&b);
|
||||
char * ap = (char*)hazard_ref(h, 0, (hazard_ptr*)&a);
|
||||
char * bp = (char*)hazard_ref(h, 1, (hazard_ptr*)&b);
|
||||
hazard_free(h, ap);
|
||||
hazard_free(h, bp);
|
||||
hazard_scan(h,0);
|
||||
|
@ -93,7 +93,7 @@ START_TEST(hazard_smokeTest) {
|
|||
hazard_ptr* slots;
|
||||
pthread_mutex_t* muts;
|
||||
void * hazard_worker(void * hp) {
|
||||
hazard_t * h = hp;
|
||||
hazard_t * h = (hazard_t*)hp;
|
||||
for(int i = 0; i < NUM_OPS; i++) {
|
||||
int ptr_off = (int)stasis_util_random64(NUM_SLOTS);
|
||||
void * p = hazard_ref(h, 0, &slots[ptr_off]);
|
||||
|
|
|
@ -267,7 +267,7 @@ recordid makekey(int thread, int i) {
|
|||
return ret;
|
||||
}
|
||||
void * worker(void* arg) {
|
||||
linear_hash_worker_args * args = arg;
|
||||
linear_hash_worker_args * args = (linear_hash_worker_args *)arg;
|
||||
int thread = args->thread;
|
||||
recordid hash = args->rid;
|
||||
|
||||
|
@ -315,7 +315,7 @@ START_TEST(linearHashNTAThreadedTest) {
|
|||
recordid rid = ThashCreate(xid, sizeof(recordid), sizeof(int));
|
||||
int i;
|
||||
Tcommit(xid);
|
||||
pthread_t threads[NUM_THREADS];
|
||||
pthread_t *threads = stasis_alloca(NUM_THREADS, pthread_t);
|
||||
for(i = 0; i < NUM_THREADS; i++) {
|
||||
linear_hash_worker_args * args = stasis_alloc(linear_hash_worker_args);
|
||||
args->thread = i;
|
||||
|
|
|
@ -50,7 +50,7 @@ terms specified in this license.
|
|||
START_TEST(rawLogEntryAlloc)
|
||||
{
|
||||
Tinit();
|
||||
stasis_log_t *l = stasis_log();
|
||||
stasis_log_t *l = (stasis_log_t *)stasis_log();
|
||||
LogEntry * log = allocCommonLogEntry(l, 200, 1, XABORT);
|
||||
assert(log->prevLSN == 200);
|
||||
assert(log->xid == 1);
|
||||
|
@ -78,7 +78,7 @@ START_TEST(updateLogEntryAlloc)
|
|||
LogEntry * log;
|
||||
|
||||
Tinit(); /* Needed because it sets up the operations table. */
|
||||
stasis_log_t *l = stasis_log();
|
||||
stasis_log_t *l = (stasis_log_t *)stasis_log();
|
||||
|
||||
log = allocUpdateLogEntry(l, 200, 1, OPERATION_SET,
|
||||
rid.page, 3*sizeof(char));
|
||||
|
@ -113,7 +113,7 @@ START_TEST(updateLogEntryAllocNoExtras)
|
|||
|
||||
recordid rid = { 3 , 4, sizeof(int)*3 };
|
||||
|
||||
stasis_log_t *l = stasis_log();
|
||||
stasis_log_t *l = (stasis_log_t *)stasis_log();
|
||||
LogEntry * log = allocUpdateLogEntry(l, 200, 1, OPERATION_SET,
|
||||
rid.page, 0);
|
||||
assert(log->prevLSN == 200);
|
||||
|
|
|
@ -63,7 +63,7 @@ terms specified in this license.
|
|||
#define LOG_NAME "check_logWriter.log"
|
||||
|
||||
LogEntry * dupLogEntry(stasis_log_t * log, const LogEntry *e) {
|
||||
LogEntry * ret = malloc(sizeofLogEntry(log, e));
|
||||
LogEntry * ret = (LogEntry *)malloc(sizeofLogEntry(log, e));
|
||||
memcpy(ret,e,sizeofLogEntry(log, e));
|
||||
return ret;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static stasis_log_t * setup_log(void) {
|
|||
Tinit();
|
||||
lsn_t firstLSN = -1;
|
||||
int first = 1;
|
||||
stasis_log_t * stasis_log_file = stasis_log();
|
||||
stasis_log_t * stasis_log_file = (stasis_log_t *)stasis_log();
|
||||
for(i = 0 ; i < 1000; i++) {
|
||||
lsn_t test = stasis_log_file->next_available_lsn(stasis_log_file);
|
||||
|
||||
|
@ -239,13 +239,13 @@ static void loggerTruncate(int logType) {
|
|||
le = nextInLog(lh);
|
||||
}
|
||||
|
||||
LogEntry * copy = malloc(sizeofLogEntry(stasis_log_file, le));
|
||||
LogEntry * copy = (LogEntry *)malloc(sizeofLogEntry(stasis_log_file, le));
|
||||
memcpy(copy, le, sizeofLogEntry(stasis_log_file, le));
|
||||
le = copy;
|
||||
|
||||
le2 = nextInLog(lh);
|
||||
|
||||
copy = malloc(sizeofLogEntry(stasis_log_file, le2));
|
||||
copy = (LogEntry *)malloc(sizeofLogEntry(stasis_log_file, le2));
|
||||
memcpy(copy, le2, sizeofLogEntry(stasis_log_file, le2));
|
||||
le2 = copy;
|
||||
|
||||
|
@ -255,7 +255,7 @@ static void loggerTruncate(int logType) {
|
|||
le3 = nextInLog(lh);
|
||||
}
|
||||
|
||||
copy = malloc(sizeofLogEntry(stasis_log_file, le3));
|
||||
copy = (LogEntry *)malloc(sizeofLogEntry(stasis_log_file, le3));
|
||||
memcpy(copy, le3, sizeofLogEntry(stasis_log_file, le3));
|
||||
le3 = copy;
|
||||
|
||||
|
@ -324,7 +324,7 @@ static void* worker_thread(void * arg) {
|
|||
lsns[i] = 0;
|
||||
}
|
||||
i = 0;
|
||||
stasis_log_t * stasis_log_file = stasis_log();
|
||||
stasis_log_t * stasis_log_file = (stasis_log_t *)stasis_log();
|
||||
|
||||
while(i < ENTRIES_PER_THREAD) {
|
||||
int threshold;
|
||||
|
|
|
@ -87,7 +87,7 @@ START_TEST(operation_physical_do_undo) {
|
|||
|
||||
|
||||
// XXX fails; set log format has changed
|
||||
setToTwo = allocUpdateLogEntry(stasis_log(), -1, xid, OPERATION_SET, rid.page,
|
||||
setToTwo = allocUpdateLogEntry((stasis_log_t*)stasis_log(), -1, xid, OPERATION_SET, rid.page,
|
||||
sizeof(slotid_t) + sizeof(int64_t) + 2 * sizeof(int));
|
||||
lsn_t setToTwo_lsn = setToTwo->LSN;
|
||||
|
||||
|
@ -175,7 +175,7 @@ START_TEST(operation_physical_do_undo) {
|
|||
*/
|
||||
|
||||
// XXX This is a hack to put some stuff in the log. Otherwise, Tdeinit() fails.
|
||||
stasis_log_t * log = stasis_log();
|
||||
stasis_log_t * log = (stasis_log_t *)stasis_log();
|
||||
|
||||
setToTwo->LSN = setToTwo_lsn; // XXX hack...
|
||||
|
||||
|
@ -664,8 +664,8 @@ START_TEST(operation_reorderable) {
|
|||
|
||||
stasis_log_reordering_handle_t * rh
|
||||
= stasis_log_reordering_handle_open(
|
||||
stasis_transaction_table_get(stasis_runtime_transaction_table(), xid[0]),
|
||||
stasis_log(),
|
||||
stasis_transaction_table_get((stasis_transaction_table_t*)stasis_runtime_transaction_table(), xid[0]),
|
||||
(stasis_log_t*)stasis_log(),
|
||||
100, // bytes (far too low!)
|
||||
10, // log entries
|
||||
500 // max byte size
|
||||
|
@ -768,7 +768,7 @@ typedef struct op_test_arg {
|
|||
} op_test_arg;
|
||||
|
||||
static int op_test_redo_impl(const LogEntry * e, Page * p) {
|
||||
const op_test_arg * a = stasis_log_entry_update_args_cptr(e);
|
||||
const op_test_arg * a = (const op_test_arg*) stasis_log_entry_update_args_cptr(e);
|
||||
for(int i = 0; i < a->count; i++) {
|
||||
Page * p = loadPage(e->xid, a->start + i);
|
||||
if(stasis_operation_multi_should_apply(e, p)) {
|
||||
|
@ -784,7 +784,7 @@ static int op_test_redo_impl(const LogEntry * e, Page * p) {
|
|||
return 0;
|
||||
}
|
||||
static int op_test_undo_impl(const LogEntry * e, Page * p) {
|
||||
const op_test_arg * a = stasis_log_entry_update_args_cptr(e);
|
||||
const op_test_arg * a = (const op_test_arg*) stasis_log_entry_update_args_cptr(e);
|
||||
for(int i = 0; i < a->count; i++) {
|
||||
Page * p = loadPage(e->xid, a->start + i);
|
||||
if(stasis_operation_multi_should_apply(e, p)) {
|
||||
|
|
|
@ -162,7 +162,7 @@ typedef struct {
|
|||
} latchFree_worker_thread_args;
|
||||
|
||||
static void* latchFree_worker_thread(void * arg_ptr) {
|
||||
latchFree_worker_thread_args * arg = arg_ptr;
|
||||
latchFree_worker_thread_args * arg = (latchFree_worker_thread_args *)arg_ptr;
|
||||
|
||||
int alloced_count = 0;
|
||||
while(1) {
|
||||
|
|
|
@ -392,19 +392,19 @@ START_TEST (rangeTracker_randomTest) {
|
|||
// printf("unpin %s\n", s);
|
||||
free(s);
|
||||
range ** r_arry = rangeTrackerRemove(rt, &ranges[i]);
|
||||
for(int i = 0; r_arry[i]; i++) {
|
||||
check_overlap(r_arry[i], explicit_pins);
|
||||
for(int j = 0; r_arry[j]; j++) {
|
||||
check_overlap(r_arry[j], explicit_pins);
|
||||
}
|
||||
for(int j = ranges[i].start; j < ranges[i].stop; j++) {
|
||||
explicit_pins[j]--;
|
||||
assert(explicit_pins[j] >= 0);
|
||||
}
|
||||
for(int i = 0; r_arry[i]; i++) {
|
||||
s = rangeToString(r_arry[i]);
|
||||
for(int j = 0; r_arry[j]; j++) {
|
||||
s = rangeToString(r_arry[j]);
|
||||
// printf(" del returned %s\n", s);
|
||||
check_no_overlap(r_arry[i], explicit_pins);
|
||||
check_no_overlap(r_arry[j], explicit_pins);
|
||||
free(s);
|
||||
free(r_arry[i]);
|
||||
free(r_arry[j]);
|
||||
}
|
||||
free(r_arry);
|
||||
pins[i]--;
|
||||
|
|
|
@ -34,8 +34,8 @@ typedef struct {
|
|||
} tup;
|
||||
|
||||
static int cmp_1(const void *ap, const void *bp, const void *ign) {
|
||||
const tup * a = ap;
|
||||
const tup * b = bp;
|
||||
const tup * a = (const tup *) ap;
|
||||
const tup * b = (const tup *) bp;
|
||||
return a->a < b->a ? -1
|
||||
: ( a->a > b->a ? 1
|
||||
: ( a->b < b->b ? -1
|
||||
|
@ -43,8 +43,8 @@ static int cmp_1(const void *ap, const void *bp, const void *ign) {
|
|||
: 0 )));
|
||||
}
|
||||
static int cmp_2(const void *ap, const void *bp, const void *ign) {
|
||||
const tup * a = ap;
|
||||
const tup * b = bp;
|
||||
const tup * a = (const tup *) ap;
|
||||
const tup * b = (const tup *) bp;
|
||||
return a->b < b->b ? -1
|
||||
: ( a->b > b->b ? 1
|
||||
: ( a->a < b->a ? -1
|
||||
|
|
|
@ -167,7 +167,7 @@ START_TEST(replacementPolicyThreadsafeRandomTest) {
|
|||
} END_TEST
|
||||
START_TEST(replacementPolicyConcurrentRandomTest) {
|
||||
int LRU_COUNT = OBJECT_COUNT / 51;
|
||||
replacementPolicy * lru[LRU_COUNT];
|
||||
replacementPolicy** lru = stasis_alloca(LRU_COUNT, replacementPolicy*);
|
||||
for(int i = 0; i < LRU_COUNT; i++) {
|
||||
lru[i] = lruFastInit();
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ START_TEST(replacementPolicyThreadsafeThreadTest) {
|
|||
} END_TEST
|
||||
START_TEST(replacementPolicyConcurrentThreadTest) {
|
||||
int LRU_COUNT = OBJECT_COUNT / 51;
|
||||
replacementPolicy * lru[LRU_COUNT];
|
||||
replacementPolicy ** lru = stasis_alloca(LRU_COUNT,replacementPolicy*);
|
||||
for(int i = 0; i < LRU_COUNT; i++) {
|
||||
lru[i] = lruFastInit();
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ START_TEST(replacementPolicyConcurrentThreadTest) {
|
|||
threaded = 1;
|
||||
worker_lru = cwLru;
|
||||
worker_count = LONG_COUNT / THREAD_COUNT;
|
||||
pthread_t threads[THREAD_COUNT];
|
||||
pthread_t *threads = stasis_alloca(THREAD_COUNT, pthread_t);
|
||||
randomSetup();
|
||||
for(int i = 0; i < THREAD_COUNT; i++) {
|
||||
pthread_create(&threads[i], 0, randomTestWorker, 0);
|
||||
|
|
Loading…
Reference in a new issue