Got rid of another broken hash implementation...

This commit is contained in:
Sears Russell 2005-01-31 01:34:29 +00:00
parent bb69197a65
commit 1260710a27
4 changed files with 4 additions and 436 deletions

View file

@ -9,9 +9,10 @@ liblladd_a_SOURCES=crc32.c common.c stats.c io.c bufferManager.c linkedlist.c op
operations/pageOperations.c page/indirect.c operations/decrement.c \
operations/increment.c operations/prepare.c operations/set.c \
operations/alloc.c operations/noop.c operations/instantSet.c \
page/slotted.c operations/lladdhash.c page/header.c page/fixed.c \
page/slotted.c page/header.c page/fixed.c \
operations/arrayList.c hash.c operations/linearHash.c \
operations/naiveLinearHash.c operations/nestedTopActions.c \
operations/linearHashNTA.c operations/linkedListNTA.c \
operations/pageOrientedListNTA.c
#operations/lladdhash.c
AM_CFLAGS= -g -Wall -pedantic -std=gnu99

View file

@ -1,282 +0,0 @@
#include <config.h>
#include <lladd/common.h>
#include <lladd/operations.h>
#include <lladd/bufferManager.h>
#include <lladd/transactional.h>
#include "../page/indirect.h"
#include <assert.h>
#include <stdio.h>
const recordid ZERO_RID = {0,0,0};
/** All of this information is derived from the hashTables rid at
runtime, or is specific to the piece of memory returned by
lHtOpen() */
struct lladdHash_t {
recordid hashTable;
int iter_next_bucket;
recordid iter_next_ptr;
};
recordid lHtCreate(int xid, int size) {
return rallocMany(xid, sizeof(recordid), size);
}
/** @todo lHtDelete is unimplemented. First need to implement derallocMany() */
int lHtDelete(int xid, lladdHash_t *ht) {
return 1;
}
lladdHash_t * lHtOpen(int xid, recordid rid) {
lladdHash_t * ret = malloc(sizeof(lladdHash_t));
ret->hashTable = rid;
ret->iter_next_bucket = 0;
ret->iter_next_ptr = ZERO_RID;
return ret;
}
void lHtClose(int xid, lladdHash_t * lht) {
free(lht);
}
/** @todo lHtValid could be more thorough. In particular, if we used
arrays of fixed length pages, then we could cheaply verify that
the entire hashbucket set had the correct size. */
int lHtValid(int xid, lladdHash_t *ht) {
Page * p = loadPage(ht->hashTable.page);
int ret = 1;
if(*page_type_ptr(p) != INDIRECT_PAGE) {
ret = 0;
}
if(ht->hashTable.slot == 0) {
ht->hashTable.slot = 1;
if(dereferenceRID(ht->hashTable).size != sizeof(recordid)) {
ret = 0;
}
ht->hashTable.slot = 0;
} else {
ret = 1;
}
releasePage(p);
return ret;
}
/**
* Hash function generator, taken directly from pblhash
*/
static int hash( const unsigned char * key, size_t keylen, int size ) {
int ret = 104729;
for( ; keylen-- > 0; key++ )
{
if( *key )
{
ret *= *key + keylen;
ret %= size;
}
}
return( ret % size );
}
typedef struct {
recordid data;
recordid next;
int keyLength;
} lht_entry_record;
static lht_entry_record * follow_overflow(int xid, lht_entry_record * entry) {
if(!isNullRecord(entry->next)) {
recordid next = entry->next;
entry = malloc(next.size);
Tread(xid, next, entry);
return entry;
} else {
return NULL;
}
}
static lht_entry_record * getEntry(int xid, recordid * entryRID, lladdHash_t * ht, const void * key, int keySize) {
recordid bucket = ht->hashTable;
lht_entry_record * entry;
recordid tmp;
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
if(!entryRID) {
entryRID = &tmp;
}
Tread(xid, bucket, entryRID);
if(!isNullRecord(*entryRID)) {
entry = malloc(entryRID->size);
Tread(xid, *entryRID, entry);
} else {
entry = NULL;
}
while(entry && memcmp(entry+1, key, keySize)) {
*entryRID = entry->next;
if(!isNullRecord(*entryRID)) {
lht_entry_record * newEntry = follow_overflow(xid, entry);
free(entry);
entry=newEntry;
} else {
entry=NULL;
}
}
return entry;
}
/** Insert a new entry into the hashtable. The entry *must not* already exist. */
static void insert_entry(int xid, lladdHash_t * ht, const void * key, int keySize, recordid dat) {
/* First, create the entry in memory. */
recordid bucket = ht->hashTable;
lht_entry_record * entry = malloc(sizeof(lht_entry_record) + keySize);
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
entry->data = dat;
Tread(xid, bucket, &(entry->next));
entry->keyLength = keySize;
memcpy(entry+1, key, keySize);
/* Now, write the changes to disk. */
recordid entryRID = Talloc(xid, sizeof(lht_entry_record) + keySize);
Tset(xid, entryRID, entry);
Tset(xid, bucket, &entryRID);
free(entry);
}
/** Assumes that the entry does, in fact, exist. */
static void delete_entry(int xid, lladdHash_t * ht, const void * key, int keySize) {
lht_entry_record * entryToDelete;
lht_entry_record * prevEntry = NULL;
recordid prevEntryRID;
recordid currentEntryRID;
recordid nextEntryRID;
recordid bucket = ht->hashTable;
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
Tread(xid, bucket, &currentEntryRID);
entryToDelete = malloc(currentEntryRID.size);
Tread(xid, currentEntryRID, entryToDelete);
nextEntryRID = entryToDelete->next;
while(memcmp(entryToDelete+1, key, keySize)) {
if(prevEntry) {
free(prevEntry);
}
prevEntry = entryToDelete;
prevEntryRID = currentEntryRID;
entryToDelete = follow_overflow(xid, entryToDelete);
assert(entryToDelete);
currentEntryRID = nextEntryRID;
nextEntryRID = entryToDelete->next;
}
if(prevEntry) {
prevEntry->next = nextEntryRID;
Tset(xid, prevEntryRID, prevEntry);
free(prevEntry);
} else {
Tset(xid, bucket, &nextEntryRID);
}
Tdealloc(xid, currentEntryRID);
free(entryToDelete);
}
recordid lHtLookup( int xid, lladdHash_t *ht, const void *key, int keylen) {
recordid ret;
lht_entry_record * entry = getEntry(xid, NULL, ht, key, keylen);
if(entry) {
ret = entry->data;
free(entry);
} else {
ret = ZERO_RID;
}
return ret;
}
recordid lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, recordid dat) { /*{void *dat, long datlen) { */
recordid entryRID;
recordid ret;
lht_entry_record * entry = getEntry(xid, &entryRID, ht, key, keylen);
if(entry){
/* assert(0); */
ret = entry->data;
entry->data = dat;
Tset(xid, entryRID, entry);
} else {
insert_entry(xid, ht, key, keylen, dat);
ret = ZERO_RID;
}
return ret;
}
/** @todo lHtRemove could be more efficient. Currently, it looks up
the hash table entry twice to remove it. */
recordid lHtRemove( int xid, lladdHash_t *ht, const void *key, int keySize) {
/* ret = lookup key */
lht_entry_record * entry = getEntry(xid, NULL, ht, key, keySize);
recordid data;
if(entry) {
data = entry->data;
delete_entry(xid, ht, key, keySize);
} else {
data = ZERO_RID;
}
return data;
}
/** @todo hashtable iterators are currently unimplemented... */
int lHtPosition( int xid, lladdHash_t *ht, const void *key, int key_length ) {
abort();
return -1;
}
int lHtFirst( int xid, lladdHash_t *ht, void *buf ) {
/* ht->iterIndex = 0;
ht->iterData = NULL;
return lHtNext( xid, ht, buf); */
abort();
return -1;
}
int lHtNext( int xid, lladdHash_t *ht, void *buf ) {
abort();
return -1;
}
int lHtCurrent(int xid, lladdHash_t *ht, void *buf) {
abort();
return -1;
}
int lHtCurrentKey(int xid, lladdHash_t *ht, void *buf) {
abort();
return -1;
}
int isNullRecord(recordid x) {
return (((x).slot == 0) && ((x).page == 0) && ((x).size==0));
}

View file

@ -1,7 +1,8 @@
INCLUDES = @CHECK_CFLAGS@
if HAVE_CHECK
## Had to disable check_lht because lht needs to be rewritten.
TESTS = check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect check_lladdhash check_pageOperations check_linearHash check_logicalLinearHash check_header check_linkedListNTA check_linearHashNTA check_pageOrientedList
TESTS = check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect check_pageOperations check_linearHash check_logicalLinearHash check_header check_linkedListNTA check_linearHashNTA check_pageOrientedList
#check_lladdhash
else
TESTS =
endif

View file

@ -1,152 +0,0 @@
/*---
This software is copyrighted by the Regents of the University of
California, and other parties. The following terms apply to all files
associated with the software unless explicitly disclaimed in
individual files.
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose,
provided that existing copyright notices are retained in all copies
and that this notice is included verbatim in any distributions. No
written agreement, license, or royalty fee is required for any of the
authorized uses. Modifications to this software may be copyrighted by
their authors and need not follow the licensing terms described here,
provided that the new terms are clearly indicated on the first page of
each file where they apply.
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND
THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
GOVERNMENT USE: If you are acquiring this software on behalf of the
U.S. government, the Government shall have only "Restricted Rights" in
the software and related documentation as defined in the Federal
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you are
acquiring the software on behalf of the Department of Defense, the
software shall be classified as "Commercial Computer Software" and the
Government shall have only "Restricted Rights" as defined in Clause
252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
authors grant the U.S. Government and others acting in its behalf
permission to use and distribute the software in accordance with the
terms specified in this license.
---*/
#include <config.h>
#include <check.h>
#include "../check_includes.h"
#include <lladd/transactional.h>
#include <assert.h>
#define LOG_NAME "check_lladdhash.log"
/** @test
executes each of the insert / remove / lookup operations a few times.
*/
#define NUM_BUCKETS 10
#define NUM_ENTRIES 100
START_TEST(simpleHashTest)
{
Tinit();
int xid = Tbegin();
recordid hashRoot = lHtCreate(xid, NUM_BUCKETS);
lladdHash_t * hash = lHtOpen(xid, hashRoot);
for(int i = 0; i < NUM_ENTRIES; i++) {
recordid rid;
rid.page=i+1;
rid.slot=i+1;
rid.size=i+1;
assert(isNullRecord(lHtInsert(xid, hash, &i, sizeof(int), rid)));
assert(!isNullRecord(lHtInsert(xid, hash, &i, sizeof(int), rid)));
}
for(int i = 0; i < NUM_ENTRIES; i+=10) {
recordid rid = lHtRemove(xid, hash, &i, sizeof(int));
assert(rid.page == (i+1));
assert(rid.slot == (i+1));
assert(rid.size == (i+1));
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
}
for(int i = 0; i < NUM_ENTRIES; i++) {
if(i % 10) {
recordid rid = lHtLookup(xid, hash, &i, sizeof(int));
assert(rid.page == (i+1));
assert(rid.slot == (i+1));
assert(rid.size == (i+1));
} else {
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
}
}
Tcommit(xid);
xid = Tbegin();
for(int i = 0; i < NUM_ENTRIES; i++) {
if(i % 10) {
recordid rid = lHtRemove(xid, hash, &i, sizeof(int));
assert(rid.page == (i+1));
assert(rid.slot == (i+1));
assert(rid.size == (i+1));
} else {
assert(isNullRecord(lHtRemove(xid, hash, &i, sizeof(int))));
}
}
for(int i = 0; i < NUM_ENTRIES; i++) {
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
}
Tabort(xid);
for(int i = 0; i < NUM_ENTRIES; i++) {
if(i % 10) {
recordid rid = lHtLookup(xid, hash, &i, sizeof(int));
assert(rid.page == (i+1));
assert(rid.slot == (i+1));
assert(rid.size == (i+1));
} else {
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
}
}
Tdeinit();
}
END_TEST
Suite * check_suite(void) {
Suite *s = suite_create("lladdHash");
/* Begin a new test */
TCase *tc = tcase_create("simple");
/* Sub tests are added, one per line, here */
tcase_add_test(tc, simpleHashTest);
/* --------------------------------------------- */
tcase_add_checked_fixture(tc, setup, teardown);
suite_add_tcase(s, tc);
return s;
}
#include "../check_setup.h"