half-hearted attempt to fix -Wextra warnings...
This commit is contained in:
parent
a3678aad5c
commit
59bd80a2a8
10 changed files with 28 additions and 23 deletions
|
@ -80,16 +80,15 @@ terms specified in this license.
|
||||||
|
|
||||||
#define PAGE_SIZE 4096
|
#define PAGE_SIZE 4096
|
||||||
|
|
||||||
#define MAX_BUFFER_SIZE 100003
|
//#define MAX_BUFFER_SIZE 100003
|
||||||
/*#define MAX_BUFFER_SIZE 20029 */
|
/*#define MAX_BUFFER_SIZE 20029 */
|
||||||
//#define MAX_BUFFER_SIZE 10007
|
//#define MAX_BUFFER_SIZE 10007
|
||||||
/*#define MAX_BUFFER_SIZE 5003*/
|
//#define MAX_BUFFER_SIZE 5003
|
||||||
/*#define MAX_BUFFER_SIZE 2003 */
|
#define MAX_BUFFER_SIZE 2003
|
||||||
/* #define MAX_BUFFER_SIZE 71 */
|
/* #define MAX_BUFFER_SIZE 71 */
|
||||||
/*#define MAX_BUFFER_SIZE 7 */
|
/*#define MAX_BUFFER_SIZE 7 */
|
||||||
/*#define BUFFER_ASOOCIATIVE 2 */
|
|
||||||
|
|
||||||
#define MAX_TRANSACTIONS 100000
|
#define MAX_TRANSACTIONS 1000
|
||||||
|
|
||||||
/** Operation types */
|
/** Operation types */
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ Operation getRealloc();
|
||||||
*/
|
*/
|
||||||
compensated_function recordid Talloc(int xid, long size);
|
compensated_function recordid Talloc(int xid, long size);
|
||||||
|
|
||||||
compensated_function recordid TallocFromPage(int xid, long page, long size);
|
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Free a record.
|
Free a record.
|
||||||
|
|
|
@ -256,12 +256,12 @@ BEGIN_C_DECLS
|
||||||
/**
|
/**
|
||||||
* represents how to look up a record on a page
|
* represents how to look up a record on a page
|
||||||
* @todo size should be 64bit. Unfortunately, 'long' is 32 bit on ia32...
|
* @todo size should be 64bit. Unfortunately, 'long' is 32 bit on ia32...
|
||||||
* @todo signed long long is a stopgap fix.. should do this in a prinicpled way.
|
* @todo int64_t is a stopgap fix.. should do this in a prinicpled way.
|
||||||
*/
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int page;
|
int page;
|
||||||
int slot;
|
int slot;
|
||||||
signed long long size;
|
int64_t size; //signed long long size;
|
||||||
} recordid;
|
} recordid;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -21,6 +21,7 @@ void iterator_init() {
|
||||||
linearHashNTAIterator_next,
|
linearHashNTAIterator_next,
|
||||||
linearHashNTAIterator_key,
|
linearHashNTAIterator_key,
|
||||||
linearHashNTAIterator_value,
|
linearHashNTAIterator_value,
|
||||||
|
noopTupDone,
|
||||||
noopTupDone
|
noopTupDone
|
||||||
};
|
};
|
||||||
lladdIterator_register(LINEAR_HASH_NTA_ITERATOR, linearHashNTA_def);
|
lladdIterator_register(LINEAR_HASH_NTA_ITERATOR, linearHashNTA_def);
|
||||||
|
@ -30,6 +31,7 @@ void iterator_init() {
|
||||||
arrayIterator_next,
|
arrayIterator_next,
|
||||||
arrayIterator_key,
|
arrayIterator_key,
|
||||||
arrayIterator_value,
|
arrayIterator_value,
|
||||||
|
noopTupDone,
|
||||||
noopTupDone
|
noopTupDone
|
||||||
};
|
};
|
||||||
lladdIterator_register(ARRAY_ITERATOR, array_def);
|
lladdIterator_register(ARRAY_ITERATOR, array_def);
|
||||||
|
|
|
@ -127,7 +127,7 @@ static struct LH_ENTRY(pair_t)* insertIntoLinkedList(struct LH_ENTRY(table) * t
|
||||||
return thePair;
|
return thePair;
|
||||||
}
|
}
|
||||||
static void extendHashTable(struct LH_ENTRY(table) * table) {
|
static void extendHashTable(struct LH_ENTRY(table) * table) {
|
||||||
int maxExtension = twoToThe(table->bucketListBits-1);
|
unsigned int maxExtension = twoToThe(table->bucketListBits-1);
|
||||||
// If table->bucketListNextExtension == maxExtension, then newBucket =
|
// If table->bucketListNextExtension == maxExtension, then newBucket =
|
||||||
// twoToThe(table->bucketListBits), which is one higher than the hash can
|
// twoToThe(table->bucketListBits), which is one higher than the hash can
|
||||||
// return.
|
// return.
|
||||||
|
@ -140,8 +140,8 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
||||||
maxExtension = twoToThe(table->bucketListBits-1);
|
maxExtension = twoToThe(table->bucketListBits-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
int splitBucket = table->bucketListNextExtension - 1;
|
unsigned int splitBucket = table->bucketListNextExtension - 1;
|
||||||
int newBucket = table->bucketListNextExtension - 1 + maxExtension;
|
unsigned int newBucket = table->bucketListNextExtension - 1 + maxExtension;
|
||||||
|
|
||||||
// Assumes realloc is reasonably fast... This seems to be a good
|
// Assumes realloc is reasonably fast... This seems to be a good
|
||||||
// assumption under linux.
|
// assumption under linux.
|
||||||
|
|
|
@ -193,7 +193,7 @@ compensated_function recordid Talloc(int xid, long size) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
compensated_function recordid TallocFromPage(int xid, long page, long size) {
|
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size) {
|
||||||
recordid rid;
|
recordid rid;
|
||||||
|
|
||||||
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
|
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
|
||||||
|
|
|
@ -60,7 +60,7 @@ void unlockBucket(int bucket) {
|
||||||
pthread_cond_broadcast(&bucketUnlocked);
|
pthread_cond_broadcast(&bucketUnlocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rehash(int xid, recordid hash, int next_split, int i, int keySize, int valSize);
|
void rehash(int xid, recordid hash, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize);
|
||||||
void update_hash_header(int xid, recordid hash, int i, int next_split);
|
void update_hash_header(int xid, recordid hash, int i, int next_split);
|
||||||
int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry * bucket_contents,
|
int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry * bucket_contents,
|
||||||
void * key, int keySize, int valSize, recordid * deletedEntry);
|
void * key, int keySize, int valSize, recordid * deletedEntry);
|
||||||
|
@ -142,7 +142,7 @@ void update_hash_header(int xid, recordid hash, int i, int next_split) {
|
||||||
} end;
|
} end;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rehash(int xid, recordid hashRid, int next_split, int i, int keySize, int valSize) {
|
void rehash(int xid, recordid hashRid, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize) {
|
||||||
try {
|
try {
|
||||||
int firstA = 1; // Is 'A' the recordid of a bucket?
|
int firstA = 1; // Is 'A' the recordid of a bucket?
|
||||||
int firstD = 1; // What about 'D'?
|
int firstD = 1; // What about 'D'?
|
||||||
|
|
|
@ -113,11 +113,11 @@ void slottedCompact(Page * page) {
|
||||||
|
|
||||||
|
|
||||||
/*static pthread_mutex_t lastFreepage_mutex; */
|
/*static pthread_mutex_t lastFreepage_mutex; */
|
||||||
static unsigned int lastFreepage = -10;
|
static uint64_t lastFreepage = -10;
|
||||||
|
|
||||||
void slottedPageInit() {
|
void slottedPageInit() {
|
||||||
/*pthread_mutex_init(&lastFreepage_mutex , NULL); */
|
/*pthread_mutex_init(&lastFreepage_mutex , NULL); */
|
||||||
lastFreepage = -1;
|
lastFreepage = UINT64_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
void slottedPageDeInit() {
|
void slottedPageDeInit() {
|
||||||
|
@ -192,7 +192,7 @@ size_t slottedFreespace(Page * page) {
|
||||||
|
|
||||||
@todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery.
|
@todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery.
|
||||||
*/
|
*/
|
||||||
compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
compensated_function recordid slottedPreRalloc(int xid, unsigned long size, Page ** pp) {
|
||||||
recordid ret;
|
recordid ret;
|
||||||
int isBlob = 0;
|
int isBlob = 0;
|
||||||
if(size == BLOB_SLOT) {
|
if(size == BLOB_SLOT) {
|
||||||
|
@ -204,7 +204,7 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
||||||
|
|
||||||
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
||||||
|
|
||||||
if(lastFreepage == -1) {
|
if(lastFreepage == UINT64_MAX) {
|
||||||
try_ret(NULLRID) {
|
try_ret(NULLRID) {
|
||||||
lastFreepage = TpageAlloc(xid);
|
lastFreepage = TpageAlloc(xid);
|
||||||
} end_ret(NULLRID);
|
} end_ret(NULLRID);
|
||||||
|
|
|
@ -96,7 +96,7 @@ void slottedPageInitialize(Page * p);
|
||||||
* @see postRallocSlot the implementation of the second phase.
|
* @see postRallocSlot the implementation of the second phase.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
compensated_function recordid slottedPreRalloc(int xid, long size, Page**p);
|
compensated_function recordid slottedPreRalloc(int xid, unsigned long size, Page**p);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The second phase of slot allocation. Called after the log entry
|
* The second phase of slot allocation. Called after the log entry
|
||||||
|
|
|
@ -105,9 +105,9 @@ START_TEST(lhtableTest)
|
||||||
long myrandom(long x) {
|
long myrandom(long x) {
|
||||||
double xx = x;
|
double xx = x;
|
||||||
double r = random();
|
double r = random();
|
||||||
double max = RAND_MAX;
|
double max = ((long)RAND_MAX)+1;
|
||||||
|
max /= xx;
|
||||||
return (long)(xx * (r/max));
|
return (long)((r/max));
|
||||||
}
|
}
|
||||||
|
|
||||||
//#define myrandom(x)(
|
//#define myrandom(x)(
|
||||||
|
@ -126,7 +126,11 @@ START_TEST(lhtableRandomized) {
|
||||||
for(int jjj = 0; jjj < NUM_ITERS; jjj++) {
|
for(int jjj = 0; jjj < NUM_ITERS; jjj++) {
|
||||||
time_t seed = time(0);
|
time_t seed = time(0);
|
||||||
printf("\nSeed = %ld\n", seed);
|
printf("\nSeed = %ld\n", seed);
|
||||||
srandom(seed);
|
if(jjj) {
|
||||||
|
srandom(seed);
|
||||||
|
} else {
|
||||||
|
srandom(1150241705); // This seed gets the random number generator to hit RAND_MAX, which makes a good test for myrandom()
|
||||||
|
}
|
||||||
|
|
||||||
struct LH_ENTRY(table) * t = LH_ENTRY(create)(myrandom(10000));
|
struct LH_ENTRY(table) * t = LH_ENTRY(create)(myrandom(10000));
|
||||||
int numSets = myrandom(MAXSETS);
|
int numSets = myrandom(MAXSETS);
|
||||||
|
|
Loading…
Reference in a new issue