Speedups, most notably in the logging subsystem.
This commit is contained in:
parent
c850ce3a51
commit
6b265f28b0
16 changed files with 190 additions and 89 deletions
|
@ -7,31 +7,40 @@
|
|||
|
||||
int main(int argc, char** argv) {
|
||||
|
||||
assert(argc == 2);
|
||||
assert(argc == 3);
|
||||
|
||||
int count = atoi(argv[1]);
|
||||
int xact_count = atoi(argv[1]);
|
||||
int count = atoi(argv[2]);
|
||||
int k;
|
||||
|
||||
unlink("storefile.txt");
|
||||
unlink("logfile.txt");
|
||||
unlink("blob0_file.txt");
|
||||
unlink("blob1_file.txt");
|
||||
|
||||
|
||||
Tinit();
|
||||
|
||||
int xid = Tbegin();
|
||||
int xid = Tbegin();
|
||||
|
||||
recordid hash = ThashAlloc(xid, sizeof(int), sizeof(int));
|
||||
|
||||
recordid hash = ThashAlloc(xid, sizeof(int), sizeof(int));
|
||||
Tcommit(xid);
|
||||
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < count ; i++) {
|
||||
for(k = 0; k < xact_count; k++) {
|
||||
|
||||
TlogicalHashInsert(xid, hash, &i, sizeof(int), &i, sizeof(int));
|
||||
xid = Tbegin();
|
||||
|
||||
for(i = 0; i < count ; i++) {
|
||||
|
||||
TlogicalHashInsert(xid, hash, &i, sizeof(int), &i, sizeof(int));
|
||||
|
||||
}
|
||||
|
||||
Tcommit(xid);
|
||||
|
||||
}
|
||||
|
||||
Tcommit(xid);
|
||||
|
||||
Tdeinit();
|
||||
/* Tdeinit(); */
|
||||
|
||||
}
|
||||
|
|
|
@ -59,6 +59,8 @@ terms specified in this license.
|
|||
* $Id$
|
||||
*/
|
||||
|
||||
//#define NDEBUG
|
||||
|
||||
#ifndef __lladd_common_h
|
||||
#define __lladd_common_h
|
||||
|
||||
|
@ -100,7 +102,8 @@ extern int errno;
|
|||
|
||||
|
||||
/*#define DEBUGGING */
|
||||
/* #define PROFILE_LATCHES */
|
||||
/*#define PROFILE_LATCHES*/
|
||||
#define NO_LATCHES
|
||||
|
||||
#ifdef DEBUGGING
|
||||
/** @todo Files that use DEBUG have to pull in stdio.h, which is a pain! */
|
||||
|
|
|
@ -76,9 +76,9 @@ terms specified in this license.
|
|||
#define PAGE_SIZE 4096
|
||||
|
||||
/* #define MAX_BUFFER_SIZE 100003 */
|
||||
/*#define MAX_BUFFER_SIZE 10007*/
|
||||
#define MAX_BUFFER_SIZE 5003
|
||||
/*#define MAX_BUFFER_SIZE 2003 */
|
||||
/*#define MAX_BUFFER_SIZE 10007 */
|
||||
/*#define MAX_BUFFER_SIZE 5003*/
|
||||
#define MAX_BUFFER_SIZE 2003
|
||||
/* #define MAX_BUFFER_SIZE 71 */
|
||||
/*#define MAX_BUFFER_SIZE 7 */
|
||||
/*#define BUFFER_ASOOCIATIVE 2 */
|
||||
|
|
|
@ -61,6 +61,8 @@ terms specified in this license.
|
|||
|
||||
|
||||
static pblHashTable_t *activePages; /* page lookup */
|
||||
/*static Page * activePagePtrs[MAX_BUFFER_SIZE];*/
|
||||
|
||||
|
||||
static pthread_mutex_t loadPagePtr_mutex;
|
||||
|
||||
|
@ -74,15 +76,15 @@ int bufInit() {
|
|||
|
||||
pthread_mutex_init(&loadPagePtr_mutex, NULL);
|
||||
|
||||
activePages = pblHtCreate();
|
||||
activePages = pblHtCreate();
|
||||
|
||||
dummy_page = pageMalloc();
|
||||
pageRealloc(dummy_page, -1);
|
||||
Page *first;
|
||||
first = pageMalloc();
|
||||
pageRealloc(first, 0);
|
||||
pblHtInsert(activePages, &first->id, sizeof(int), first);
|
||||
|
||||
pblHtInsert(activePages, &first->id, sizeof(int), first);
|
||||
|
||||
openBlobStore();
|
||||
|
||||
pageCacheInit(first);
|
||||
|
@ -98,8 +100,8 @@ void bufDeinit() {
|
|||
|
||||
Page *p;
|
||||
DEBUG("pageCacheDeinit()");
|
||||
|
||||
for( p = (Page*)pblHtFirst( activePages ); p; p = (Page*)pblHtNext(activePages)) {
|
||||
|
||||
for( p = (Page*)pblHtFirst( activePages ); p; p = (Page*)pblHtNext(activePages)) {
|
||||
|
||||
pblHtRemove( activePages, 0, 0 );
|
||||
DEBUG("+");
|
||||
|
@ -109,8 +111,8 @@ void bufDeinit() {
|
|||
abort();
|
||||
/ * exit(ret); * /
|
||||
}*/
|
||||
|
||||
pageWrite(p);
|
||||
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&loadPagePtr_mutex);
|
||||
|
@ -271,7 +273,7 @@ Page * getPage(int pageid, int locktype) {
|
|||
|
||||
}
|
||||
|
||||
assert(ret->id == pageid);
|
||||
/* assert(ret->id == pageid); */
|
||||
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -91,4 +91,29 @@ void __profile_deletelock (rwl *lock);
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef NO_LATCHES
|
||||
|
||||
|
||||
|
||||
/* #define pthread_mutex_init(x, y) */
|
||||
/* #define pthread_mutex_destroy(x) */
|
||||
#define pthread_mutex_lock(x) 1
|
||||
#define pthread_mutex_unlock(x) 1
|
||||
#define pthread_mutex_trylock(x) 1
|
||||
#define pthread_cond_wait(x, y) 1
|
||||
#define pthread_cond_timedwait(x, y, z) 1
|
||||
|
||||
/* #define initlock() */
|
||||
#define readlock(x, y) 1
|
||||
#define writelock(x, y) 1
|
||||
#define readunlock(x) 1
|
||||
#define writeunlock(x) 1
|
||||
#define unlock(x) 1
|
||||
#define downgradelock(x) 1
|
||||
/* #define deletelock(x) */
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LATCHES_H */
|
||||
|
|
|
@ -124,8 +124,10 @@ pthread_mutex_t log_write_mutex;
|
|||
pthread_mutex_t truncateLog_mutex;
|
||||
|
||||
|
||||
|
||||
static int sought = 1;
|
||||
int openLogWriter() {
|
||||
#define BUFSIZE 1024*16
|
||||
char * buffer = malloc(BUFSIZE);
|
||||
log = fopen(LOG_FILE, "a+");
|
||||
|
||||
if (log==NULL) {
|
||||
|
@ -133,6 +135,9 @@ int openLogWriter() {
|
|||
/*there was an error opening this file */
|
||||
return FILE_WRITE_OPEN_ERROR;
|
||||
}
|
||||
|
||||
setbuffer(log, buffer, BUFSIZE);
|
||||
|
||||
|
||||
/* Initialize locks. */
|
||||
|
||||
|
@ -179,7 +184,7 @@ int openLogWriter() {
|
|||
count = fread(&global_offset, sizeof(lsn_t), 1, log);
|
||||
assert(count == 1);
|
||||
}
|
||||
|
||||
sought =1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -203,7 +208,7 @@ int openLogWriter() {
|
|||
|
||||
*/
|
||||
|
||||
int flushLog();
|
||||
static int flushLog();
|
||||
|
||||
int writeLogEntry(LogEntry * e) {
|
||||
|
||||
|
@ -246,6 +251,7 @@ int writeLogEntry(LogEntry * e) {
|
|||
|
||||
#ifdef DEBUGGING
|
||||
e->LSN = myFseek(log, 0, SEEK_END) + global_offset;
|
||||
sought = 1;
|
||||
if(nextAvailableLSN != e->LSN) {
|
||||
assert(nextAvailableLSN <= e->LSN);
|
||||
DEBUG("Detected log truncation: nextAvailableLSN = %ld, but log length is %ld.\n", (long)nextAvailableLSN, e->LSN);
|
||||
|
@ -260,8 +266,6 @@ int writeLogEntry(LogEntry * e) {
|
|||
nextAvailableLSN += (size + sizeof(long));
|
||||
int oldBufferedSize = bufferedSize;
|
||||
bufferedSize += (size + sizeof(long));
|
||||
fseek(log, writtenLSN_val /*nextAvailableLSN*/ - global_offset, SEEK_SET);
|
||||
|
||||
|
||||
logBuffer = realloc(logBuffer, size + sizeof(long));
|
||||
if(! logBuffer) {
|
||||
|
@ -287,6 +291,12 @@ int writeLogEntry(LogEntry * e) {
|
|||
such heavy use of global variables...) */
|
||||
static int flushLog() {
|
||||
if (!logBuffer) { return 0;}
|
||||
|
||||
if(sought) {
|
||||
fseek(log, writtenLSN_val /*nextAvailableLSN*/ - global_offset, SEEK_SET);
|
||||
sought = 0;
|
||||
}
|
||||
|
||||
int nmemb = fwrite(logBuffer, bufferedSize, 1, log);
|
||||
writtenLSN_val += bufferedSize;
|
||||
bufferedSize = 0;
|
||||
|
@ -302,9 +312,12 @@ static int flushLog() {
|
|||
|
||||
void syncLog() {
|
||||
lsn_t newFlushedLSN;
|
||||
|
||||
newFlushedLSN = myFseek(log, 0, SEEK_END);
|
||||
|
||||
if(sought) {
|
||||
newFlushedLSN = myFseek(log, 0, SEEK_END);
|
||||
sought = 1;
|
||||
} else {
|
||||
newFlushedLSN = ftell(log);
|
||||
}
|
||||
/* Wait to set the static variable until after the flush returns. */
|
||||
|
||||
fflush(log);
|
||||
|
@ -425,6 +438,7 @@ LogEntry * readLSNEntry(lsn_t LSN) {
|
|||
|
||||
flockfile(log);
|
||||
fseek(log, LSN - global_offset, SEEK_SET);
|
||||
sought = 1;
|
||||
ret = readLogEntry();
|
||||
funlockfile(log);
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ static lsn_t LogTransCommon(TransactionLog * l, int type) {
|
|||
}
|
||||
|
||||
lsn_t LogTransCommit(TransactionLog * l) {
|
||||
syncLog();
|
||||
return LogTransCommon(l, XCOMMIT);
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ int TarrayListExtend(int xid, recordid rid, int slots) {
|
|||
could handle uninitialized blocks correctly, then we
|
||||
wouldn't have to iterate over the datapages in
|
||||
TarrayListExtend() */
|
||||
Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
// Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
}
|
||||
|
||||
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
|
||||
|
@ -205,15 +205,15 @@ int TarrayListInstantExtend(int xid, recordid rid, int slots) {
|
|||
int newFirstPage = TpageAllocMany(xid, blockSize);
|
||||
DEBUG("block %d\n", i);
|
||||
/* Iterate over the storage blocks that are pointed to by our current indirection block. */
|
||||
for(int j = 0; j < blockSize; j++) {
|
||||
/* for(int j = 0; j < blockSize; j++) {
|
||||
DEBUG("page %d (%d)\n", j, j + newFirstPage);
|
||||
tmp2.page = j + newFirstPage;
|
||||
/** @todo If we were a little smarter about this, and fixed.c
|
||||
/ ** @todo If we were a little smarter about this, and fixed.c
|
||||
coulds handle uninitialized blocks correctly, then we
|
||||
wouldn't have to iterate over the datapages in
|
||||
TarrayListExtend() */
|
||||
Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
}
|
||||
TarrayListExtend() * /
|
||||
// Tupdate(xid, tmp2, NULL, OPERATION_INITIALIZE_FIXED_PAGE);
|
||||
} */
|
||||
|
||||
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
|
||||
/** @todo what does this do to recovery?? */
|
||||
|
|
|
@ -188,15 +188,24 @@ int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key,
|
|||
return found;
|
||||
}*/
|
||||
|
||||
int extendCount = 0;
|
||||
void instant_expand (int xid, recordid hash, int next_split, int i, int keySize, int valSize) {
|
||||
TarrayListInstantExtend(xid, hash, 1);
|
||||
if(next_split >= twoToThe(i-1)+2) {
|
||||
i++;
|
||||
next_split = 2;
|
||||
extendCount ++;
|
||||
if(extendCount >= 70) {
|
||||
TarrayListInstantExtend(xid, hash, 100);
|
||||
int j;
|
||||
for(j = 0; j < 100; j++) {
|
||||
if(next_split >= twoToThe(i-1)+2) {
|
||||
i++;
|
||||
next_split = 2;
|
||||
}
|
||||
instant_rehash(xid, hash, next_split, i, keySize, valSize);
|
||||
next_split++;
|
||||
}
|
||||
instant_update_hash_header(xid, hash, i, next_split);
|
||||
extendCount = 0;
|
||||
}
|
||||
instant_rehash(xid, hash, next_split, i, keySize, valSize);
|
||||
next_split++;
|
||||
instant_update_hash_header(xid, hash, i, next_split);
|
||||
|
||||
}
|
||||
|
||||
void instant_update_hash_header(int xid, recordid hash, int i, int next_split) {
|
||||
|
|
|
@ -280,8 +280,8 @@ int deleteFromBucket(int xid, recordid hash, int bucket_number, recordid bucket_
|
|||
|
||||
recordid ThashAlloc(int xid, int keySize, int valSize) {
|
||||
/* Want 16 buckets, doubling on overflow. */
|
||||
recordid rid = TarrayListAlloc(xid, 16, 2, sizeof(recordid));
|
||||
TarrayListExtend(xid, rid, 32+2);
|
||||
recordid rid = TarrayListAlloc(xid, 4096, 2, sizeof(recordid));
|
||||
TarrayListExtend(xid, rid, 4096+2);
|
||||
|
||||
recordid headerRidA;
|
||||
recordid * headerRidB = malloc (sizeof(recordid));
|
||||
|
@ -290,7 +290,7 @@ recordid ThashAlloc(int xid, int keySize, int valSize) {
|
|||
headerValSize = valSize;
|
||||
|
||||
headerNextSplit = INT_MAX;
|
||||
headerHashBits = 4;
|
||||
headerHashBits = 12;
|
||||
|
||||
rid.slot =0;
|
||||
Tset(xid, rid, &headerRidA);
|
||||
|
|
|
@ -251,7 +251,7 @@ void writeRecord(int xid, Page * p, lsn_t lsn, recordid rid, const void *dat) {
|
|||
writeBlob(xid, p, lsn, rid, dat);
|
||||
} else if(*page_type_ptr(p) == SLOTTED_PAGE) {
|
||||
slottedWrite(xid, p, lsn, rid, dat);
|
||||
} else if(*page_type_ptr(p) == FIXED_PAGE) {
|
||||
} else if(*page_type_ptr(p) == FIXED_PAGE || !*page_type_ptr(p) ) {
|
||||
fixedWrite(p, rid, dat);
|
||||
} else {
|
||||
abort();
|
||||
|
@ -273,7 +273,9 @@ void readRecord(int xid, Page * p, recordid rid, void *buf) {
|
|||
readBlob(xid, p, rid, buf);
|
||||
} else if(page_type == SLOTTED_PAGE) {
|
||||
slottedRead(xid, p, rid, buf);
|
||||
} else if(page_type == FIXED_PAGE) {
|
||||
/* FIXED_PAGES can function correctly even if they have not been
|
||||
initialized. */
|
||||
} else if(page_type == FIXED_PAGE || !page_type) {
|
||||
fixedRead(p, rid, buf);
|
||||
} else {
|
||||
abort();
|
||||
|
|
|
@ -54,12 +54,16 @@ recordid fixedRawRalloc(Page *page) {
|
|||
}
|
||||
|
||||
static void checkRid(Page * page, recordid rid) {
|
||||
assert(*page_type_ptr(page) == FIXED_PAGE || *page_type_ptr(page) == ARRAY_LIST_PAGE);
|
||||
assert(page->id == rid.page);
|
||||
assert(*recordsize_ptr(page) == rid.size);
|
||||
/* assert(recordsPerPage(rid.size) > rid.slot); */
|
||||
int recCount = *recordcount_ptr(page);
|
||||
assert(recCount > rid.slot);
|
||||
if(*page_type_ptr(page)) {
|
||||
assert(*page_type_ptr(page) == FIXED_PAGE || *page_type_ptr(page) == ARRAY_LIST_PAGE);
|
||||
assert(page->id == rid.page);
|
||||
assert(*recordsize_ptr(page) == rid.size);
|
||||
/* assert(recordsPerPage(rid.size) > rid.slot); */
|
||||
int recCount = *recordcount_ptr(page);
|
||||
assert(recCount > rid.slot);
|
||||
} else {
|
||||
fixedPageInitialize(page, rid.size, recordsPerPage(rid.size));
|
||||
}
|
||||
}
|
||||
|
||||
void fixedReadUnlocked(Page * page, recordid rid, byte * buf) {
|
||||
|
|
|
@ -194,7 +194,6 @@ unsigned int indirectPageRecordCount(recordid rid) {
|
|||
|
||||
} else {
|
||||
printf("Unknown page type in indirectPageRecordCount()\n");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
}
|
||||
releasePage(p);
|
||||
|
|
|
@ -28,28 +28,38 @@ static pthread_mutex_t stable_mutex;
|
|||
/* static long myLseek(int f, long offset, int whence); */
|
||||
static long myLseekNoLock(int f, long offset, int whence);
|
||||
|
||||
static int oldOffset = -1;
|
||||
|
||||
|
||||
void pageRead(Page *ret) {
|
||||
|
||||
long pageoffset;
|
||||
long offset;
|
||||
|
||||
/** @todo pageRead() is using fseek to calculate the file size on each read, which is inefficient. */
|
||||
pageoffset = ret->id * PAGE_SIZE;
|
||||
pthread_mutex_lock(&stable_mutex);
|
||||
|
||||
offset = myLseekNoLock(stable, pageoffset, SEEK_SET);
|
||||
|
||||
if(oldOffset != pageoffset) {
|
||||
offset = myLseekNoLock(stable, pageoffset, SEEK_SET);
|
||||
assert(offset == pageoffset);
|
||||
} else {
|
||||
offset = oldOffset;
|
||||
}
|
||||
oldOffset = offset + PAGE_SIZE;
|
||||
|
||||
assert(offset == pageoffset);
|
||||
int read_size;
|
||||
read_size = read(stable, ret->memAddr, PAGE_SIZE);
|
||||
if(read_size != PAGE_SIZE) {
|
||||
if (!read_size) {
|
||||
long fileSize = myLseekNoLock(stable, 0, SEEK_END);
|
||||
if (!read_size) { /* Past EOF... */
|
||||
/* long fileSize = myLseekNoLock(stable, 0, SEEK_END);
|
||||
offset = myLseekNoLock(stable, pageoffset, SEEK_SET);
|
||||
assert(offset == pageoffset);
|
||||
if(fileSize <= pageoffset) {
|
||||
assert(offset == pageoffset); */
|
||||
/* if(fileSize <= pageoffset) { */
|
||||
memset(ret->memAddr, 0, PAGE_SIZE);
|
||||
write(stable, ret->memAddr, PAGE_SIZE); /* all this does is extend the file.. */
|
||||
}
|
||||
/* write(stable, ret->memAddr, PAGE_SIZE); */ /* all this does is extend the file..why would we bother doing that? :)
|
||||
} */
|
||||
} else if(read_size == -1) {
|
||||
perror("pageFile.c couldn't read");
|
||||
fflush(NULL);
|
||||
|
@ -77,25 +87,32 @@ void pageWrite(Page * ret) {
|
|||
|
||||
pthread_mutex_lock(&stable_mutex);
|
||||
|
||||
offset = myLseekNoLock(stable, pageoffset, SEEK_SET);
|
||||
assert(offset == pageoffset);
|
||||
if(oldOffset != pageoffset) {
|
||||
offset = myLseekNoLock(stable, pageoffset, SEEK_SET);
|
||||
assert(offset == pageoffset);
|
||||
} else {
|
||||
offset = oldOffset;
|
||||
}
|
||||
oldOffset = offset + PAGE_SIZE;
|
||||
assert(ret->memAddr);
|
||||
|
||||
/* DEBUG("Writing page %d\n", ret->id); */
|
||||
int write_ret = write(stable, ret->memAddr, PAGE_SIZE);
|
||||
if(-1 == write_ret) {
|
||||
perror("pageFile.c couldn't write");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
} else if(0 == write_ret) {
|
||||
/* now what? */
|
||||
printf("write_ret is zero\n");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
} else if(write_ret != PAGE_SIZE){
|
||||
printf("write_ret is %d\n", write_ret);
|
||||
fflush(NULL);
|
||||
abort();
|
||||
if(write_ret != PAGE_SIZE) {
|
||||
if(-1 == write_ret) {
|
||||
perror("pageFile.c couldn't write");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
} else if(0 == write_ret) {
|
||||
/* now what? */
|
||||
printf("write_ret is zero\n");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
} else {
|
||||
printf("write_ret is %d\n", write_ret);
|
||||
fflush(NULL);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&stable_mutex);
|
||||
}
|
||||
|
@ -127,7 +144,7 @@ void closePageFile() {
|
|||
stable = -1;
|
||||
}
|
||||
|
||||
long myLseek(int f, long offset, int whence) {
|
||||
static long myLseek(int f, long offset, int whence) {
|
||||
long ret;
|
||||
pthread_mutex_lock(&stable_mutex);
|
||||
ret = myLseekNoLock(f, offset, whence);
|
||||
|
@ -135,7 +152,7 @@ long myLseek(int f, long offset, int whence) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
long myLseekNoLock(int f, long offset, int whence) {
|
||||
static long myLseekNoLock(int f, long offset, int whence) {
|
||||
assert(! ( offset % 4096 ));
|
||||
long ret = lseek(f, offset, whence);
|
||||
if(ret == -1) {
|
||||
|
@ -158,8 +175,13 @@ long myLseekNoLock(int f, long offset, int whence) {
|
|||
}*/
|
||||
|
||||
long pageCount() {
|
||||
pthread_mutex_lock(&stable_mutex);
|
||||
printf(".");
|
||||
long fileSize = myLseek(stable, 0, SEEK_END);
|
||||
|
||||
oldOffset = -1;
|
||||
|
||||
pthread_mutex_unlock(&stable_mutex);
|
||||
assert(! (fileSize % PAGE_SIZE));
|
||||
return fileSize / PAGE_SIZE;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
please see: http://mission.base.com/.
|
||||
|
||||
$Log$
|
||||
Revision 1.5 2004/10/19 04:45:42 sears
|
||||
Speedups, most notably in the logging subsystem.
|
||||
|
||||
Revision 1.4 2004/10/18 18:24:51 sears
|
||||
Preliminary version of logical logging linear hash. (No latching yet, and there are some bugs re-opening a closed hash.)
|
||||
|
||||
|
@ -74,8 +77,10 @@ static int rcsid_fct() { return( rcsid ? 0 : rcsid_fct() ); }
|
|||
/*****************************************************************************/
|
||||
/* #defines */
|
||||
/*****************************************************************************/
|
||||
/*#define PBL_HASHTABLE_SIZE 1019*/
|
||||
#define PBL_HASHTABLE_SIZE 2017
|
||||
/*#define PBL_HASHTABLE_SIZE 1019 */
|
||||
/*#define PBL_HASHTABLE_SIZE 2017*/
|
||||
#define PBL_HASHTABLE_SIZE 2048
|
||||
/* #define PBL_HASHTABLE_SIZE 5003 */
|
||||
/*#define PBL_HASHTABLE_SIZE 100003 */
|
||||
|
||||
/*****************************************************************************/
|
||||
|
@ -140,9 +145,15 @@ typedef struct pbl_hashtable_s pbl_hashtable_t;
|
|||
return( ret % PBL_HASHTABLE_SIZE );
|
||||
}*/
|
||||
#include <lladd/crc32.h>
|
||||
static unsigned int hash( const unsigned char * key, size_t keylen ) {
|
||||
return ((unsigned int)(crc32((char*)key, keylen, -1))) % PBL_HASHTABLE_SIZE;
|
||||
}
|
||||
/*static unsigned int hash( const unsigned char * key, size_t keylen ) {
|
||||
if(keylen == sizeof(int)) {return *key &(PBL_HASHTABLE_SIZE-1);}//% PBL_HASHTABLE_SIZE;}
|
||||
return ((unsigned int)(crc32((char*)key, keylen, -1))) & (PBL_HASHTABLE_SIZE-1); //% PBL_HASHTABLE_SIZE;
|
||||
}*/
|
||||
|
||||
#define hash(x, y) (((keylen)==sizeof(int) ? \
|
||||
(*(unsigned int*)key) & (PBL_HASHTABLE_SIZE-1) :\
|
||||
((unsigned int)(crc32((char*)(key), (keylen), -1))) & (PBL_HASHTABLE_SIZE-1)))
|
||||
|
||||
|
||||
/**
|
||||
* create a new hash table
|
||||
|
@ -272,7 +283,7 @@ size_t keylen /** length of that key */
|
|||
|
||||
for( item = bucket->head; item; item = item->bucketnext )
|
||||
{
|
||||
if(( item->keylen == keylen ) && !memcmp( item->key, key, keylen ))
|
||||
if(( item->keylen == keylen ) && !memcmp( item->key, key, keylen ))
|
||||
{
|
||||
ht->current = item;
|
||||
ht->currentdeleted = 0;
|
||||
|
|
|
@ -13,7 +13,7 @@ static char * logEntryToString(LogEntry * le) {
|
|||
case UPDATELOG:
|
||||
{
|
||||
recordid rid = le->contents.clr.rid;
|
||||
asprintf(&ret, "UPDATE\tlsn=%9ld\tprevlsn=%9ld\txid=%4d\trid={%5d %5d %5ld}\tfuncId=%3d\targSize=%9d\n", le->LSN, le->prevLSN, le->xid,
|
||||
asprintf(&ret, "UPDATE\tlsn=%9ld\tprevlsn=%9ld\txid=%4d\trid={%8d %5d %5ld}\tfuncId=%3d\targSize=%9d\n", le->LSN, le->prevLSN, le->xid,
|
||||
rid.page, rid.slot, rid.size, le->contents.update.funcID, le->contents.update.argSize );
|
||||
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ static char * logEntryToString(LogEntry * le) {
|
|||
case CLRLOG:
|
||||
{
|
||||
recordid rid = le->contents.clr.rid;
|
||||
asprintf(&ret, "CLR \tlsn=%9ld\tprevlsn=%9ld\txid=%4d\trid={%5d %5d %5ld}\tthisUpdateLSN=%9ld\tundoNextLSN=%9ld\n", le->LSN, le->prevLSN, le->xid,
|
||||
asprintf(&ret, "CLR \tlsn=%9ld\tprevlsn=%9ld\txid=%4d\trid={%8d %5d %5ld}\tthisUpdateLSN=%9ld\tundoNextLSN=%9ld\n", le->LSN, le->prevLSN, le->xid,
|
||||
rid.page, rid.slot, rid.size, (long int)le->contents.clr.thisUpdateLSN, (long int)le->contents.clr.undoNextLSN );
|
||||
}
|
||||
break;
|
||||
|
|
Loading…
Reference in a new issue