half-hearted attempt to fix -Wextra warnings...
This commit is contained in:
parent
a3678aad5c
commit
59bd80a2a8
10 changed files with 28 additions and 23 deletions
|
@ -80,16 +80,15 @@ terms specified in this license.
|
|||
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
#define MAX_BUFFER_SIZE 100003
|
||||
//#define MAX_BUFFER_SIZE 100003
|
||||
/*#define MAX_BUFFER_SIZE 20029 */
|
||||
//#define MAX_BUFFER_SIZE 10007
|
||||
/*#define MAX_BUFFER_SIZE 5003*/
|
||||
/*#define MAX_BUFFER_SIZE 2003 */
|
||||
//#define MAX_BUFFER_SIZE 5003
|
||||
#define MAX_BUFFER_SIZE 2003
|
||||
/* #define MAX_BUFFER_SIZE 71 */
|
||||
/*#define MAX_BUFFER_SIZE 7 */
|
||||
/*#define BUFFER_ASOOCIATIVE 2 */
|
||||
|
||||
#define MAX_TRANSACTIONS 100000
|
||||
#define MAX_TRANSACTIONS 1000
|
||||
|
||||
/** Operation types */
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ Operation getRealloc();
|
|||
*/
|
||||
compensated_function recordid Talloc(int xid, long size);
|
||||
|
||||
compensated_function recordid TallocFromPage(int xid, long page, long size);
|
||||
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size);
|
||||
|
||||
/**
|
||||
Free a record.
|
||||
|
|
|
@ -256,12 +256,12 @@ BEGIN_C_DECLS
|
|||
/**
|
||||
* represents how to look up a record on a page
|
||||
* @todo size should be 64bit. Unfortunately, 'long' is 32 bit on ia32...
|
||||
* @todo signed long long is a stopgap fix.. should do this in a prinicpled way.
|
||||
* @todo int64_t is a stopgap fix.. should do this in a prinicpled way.
|
||||
*/
|
||||
typedef struct {
|
||||
int page;
|
||||
int slot;
|
||||
signed long long size;
|
||||
int64_t size; //signed long long size;
|
||||
} recordid;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -21,6 +21,7 @@ void iterator_init() {
|
|||
linearHashNTAIterator_next,
|
||||
linearHashNTAIterator_key,
|
||||
linearHashNTAIterator_value,
|
||||
noopTupDone,
|
||||
noopTupDone
|
||||
};
|
||||
lladdIterator_register(LINEAR_HASH_NTA_ITERATOR, linearHashNTA_def);
|
||||
|
@ -30,6 +31,7 @@ void iterator_init() {
|
|||
arrayIterator_next,
|
||||
arrayIterator_key,
|
||||
arrayIterator_value,
|
||||
noopTupDone,
|
||||
noopTupDone
|
||||
};
|
||||
lladdIterator_register(ARRAY_ITERATOR, array_def);
|
||||
|
|
|
@ -127,7 +127,7 @@ static struct LH_ENTRY(pair_t)* insertIntoLinkedList(struct LH_ENTRY(table) * t
|
|||
return thePair;
|
||||
}
|
||||
static void extendHashTable(struct LH_ENTRY(table) * table) {
|
||||
int maxExtension = twoToThe(table->bucketListBits-1);
|
||||
unsigned int maxExtension = twoToThe(table->bucketListBits-1);
|
||||
// If table->bucketListNextExtension == maxExtension, then newBucket =
|
||||
// twoToThe(table->bucketListBits), which is one higher than the hash can
|
||||
// return.
|
||||
|
@ -140,8 +140,8 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
|||
maxExtension = twoToThe(table->bucketListBits-1);
|
||||
}
|
||||
|
||||
int splitBucket = table->bucketListNextExtension - 1;
|
||||
int newBucket = table->bucketListNextExtension - 1 + maxExtension;
|
||||
unsigned int splitBucket = table->bucketListNextExtension - 1;
|
||||
unsigned int newBucket = table->bucketListNextExtension - 1 + maxExtension;
|
||||
|
||||
// Assumes realloc is reasonably fast... This seems to be a good
|
||||
// assumption under linux.
|
||||
|
|
|
@ -193,7 +193,7 @@ compensated_function recordid Talloc(int xid, long size) {
|
|||
|
||||
}
|
||||
|
||||
compensated_function recordid TallocFromPage(int xid, long page, long size) {
|
||||
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size) {
|
||||
recordid rid;
|
||||
|
||||
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
|
||||
|
|
|
@ -60,7 +60,7 @@ void unlockBucket(int bucket) {
|
|||
pthread_cond_broadcast(&bucketUnlocked);
|
||||
}
|
||||
|
||||
void rehash(int xid, recordid hash, int next_split, int i, int keySize, int valSize);
|
||||
void rehash(int xid, recordid hash, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize);
|
||||
void update_hash_header(int xid, recordid hash, int i, int next_split);
|
||||
int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry * bucket_contents,
|
||||
void * key, int keySize, int valSize, recordid * deletedEntry);
|
||||
|
@ -142,7 +142,7 @@ void update_hash_header(int xid, recordid hash, int i, int next_split) {
|
|||
} end;
|
||||
}
|
||||
|
||||
void rehash(int xid, recordid hashRid, int next_split, int i, int keySize, int valSize) {
|
||||
void rehash(int xid, recordid hashRid, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize) {
|
||||
try {
|
||||
int firstA = 1; // Is 'A' the recordid of a bucket?
|
||||
int firstD = 1; // What about 'D'?
|
||||
|
|
|
@ -113,11 +113,11 @@ void slottedCompact(Page * page) {
|
|||
|
||||
|
||||
/*static pthread_mutex_t lastFreepage_mutex; */
|
||||
static unsigned int lastFreepage = -10;
|
||||
static uint64_t lastFreepage = -10;
|
||||
|
||||
void slottedPageInit() {
|
||||
/*pthread_mutex_init(&lastFreepage_mutex , NULL); */
|
||||
lastFreepage = -1;
|
||||
lastFreepage = UINT64_MAX;
|
||||
}
|
||||
|
||||
void slottedPageDeInit() {
|
||||
|
@ -192,7 +192,7 @@ size_t slottedFreespace(Page * page) {
|
|||
|
||||
@todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery.
|
||||
*/
|
||||
compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
||||
compensated_function recordid slottedPreRalloc(int xid, unsigned long size, Page ** pp) {
|
||||
recordid ret;
|
||||
int isBlob = 0;
|
||||
if(size == BLOB_SLOT) {
|
||||
|
@ -204,7 +204,7 @@ compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
|||
|
||||
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
||||
|
||||
if(lastFreepage == -1) {
|
||||
if(lastFreepage == UINT64_MAX) {
|
||||
try_ret(NULLRID) {
|
||||
lastFreepage = TpageAlloc(xid);
|
||||
} end_ret(NULLRID);
|
||||
|
|
|
@ -96,7 +96,7 @@ void slottedPageInitialize(Page * p);
|
|||
* @see postRallocSlot the implementation of the second phase.
|
||||
*
|
||||
*/
|
||||
compensated_function recordid slottedPreRalloc(int xid, long size, Page**p);
|
||||
compensated_function recordid slottedPreRalloc(int xid, unsigned long size, Page**p);
|
||||
|
||||
/**
|
||||
* The second phase of slot allocation. Called after the log entry
|
||||
|
|
|
@ -105,9 +105,9 @@ START_TEST(lhtableTest)
|
|||
long myrandom(long x) {
|
||||
double xx = x;
|
||||
double r = random();
|
||||
double max = RAND_MAX;
|
||||
|
||||
return (long)(xx * (r/max));
|
||||
double max = ((long)RAND_MAX)+1;
|
||||
max /= xx;
|
||||
return (long)((r/max));
|
||||
}
|
||||
|
||||
//#define myrandom(x)(
|
||||
|
@ -126,7 +126,11 @@ START_TEST(lhtableRandomized) {
|
|||
for(int jjj = 0; jjj < NUM_ITERS; jjj++) {
|
||||
time_t seed = time(0);
|
||||
printf("\nSeed = %ld\n", seed);
|
||||
if(jjj) {
|
||||
srandom(seed);
|
||||
} else {
|
||||
srandom(1150241705); // This seed gets the random number generator to hit RAND_MAX, which makes a good test for myrandom()
|
||||
}
|
||||
|
||||
struct LH_ENTRY(table) * t = LH_ENTRY(create)(myrandom(10000));
|
||||
int numSets = myrandom(MAXSETS);
|
||||
|
|
Loading…
Reference in a new issue