Added (untested) support for whole-page operations, lladdhash now works.
This commit is contained in:
parent
20e78dd0db
commit
82e3fdb53a
21 changed files with 817 additions and 516 deletions
|
@ -86,15 +86,20 @@ terms specified in this license.
|
|||
|
||||
/** Operation types */
|
||||
|
||||
#define NO_INVERSE -1
|
||||
#define OPERATION_SET 0
|
||||
#define OPERATION_INCREMENT 1
|
||||
#define OPERATION_DECREMENT 2
|
||||
#define OPERATION_ALLOC 3
|
||||
#define OPERATION_PREPARE 4
|
||||
#define OPERATION_LHINSERT 5
|
||||
#define OPERATION_LHREMOVE 6
|
||||
#define OPERATION_DEALLOC 7
|
||||
#define NO_INVERSE_WHOLE_PAGE -2
|
||||
#define NO_INVERSE -1
|
||||
#define OPERATION_SET 0
|
||||
#define OPERATION_INCREMENT 1
|
||||
#define OPERATION_DECREMENT 2
|
||||
#define OPERATION_ALLOC 3
|
||||
#define OPERATION_PREPARE 4
|
||||
#define OPERATION_LHINSERT 5
|
||||
#define OPERATION_LHREMOVE 6
|
||||
#define OPERATION_DEALLOC 7
|
||||
#define OPERATION_PAGE_ALLOC 8
|
||||
#define OPERATION_PAGE_DEALLOC 9
|
||||
#define OPERATION_PAGE_SET 10
|
||||
|
||||
/* number above should be less than number below */
|
||||
#define MAX_OPERATIONS 20
|
||||
|
||||
|
|
|
@ -136,6 +136,7 @@ typedef struct {
|
|||
#include "operations/prepare.h"
|
||||
#include "operations/lladdhash.h"
|
||||
#include "operations/alloc.h"
|
||||
#include "operations/pageOperations.h"
|
||||
|
||||
|
||||
extern Operation operationsTable[]; /* [MAX_OPERATIONS]; memset somewhere */
|
||||
|
|
|
@ -26,53 +26,51 @@
|
|||
|
||||
*/
|
||||
|
||||
#include <lladd/operations.h>
|
||||
|
||||
#ifndef __LLADDHASH_H__
|
||||
#define __LLADDHASH_H__
|
||||
|
||||
#include <lladd/operations.h>
|
||||
int isNullRecord(recordid x);
|
||||
|
||||
#define MAX_LLADDHASHES 1000
|
||||
|
||||
typedef struct {
|
||||
recordid store;
|
||||
int keylen;
|
||||
long datlen;
|
||||
recordid next;
|
||||
} lladdHashItem_t;
|
||||
|
||||
typedef struct {
|
||||
int size;
|
||||
recordid hashmap_record;
|
||||
/* recordid store; */
|
||||
int store;
|
||||
lladdHashItem_t *iterItem;
|
||||
unsigned int iterIndex;
|
||||
void *iterData;
|
||||
recordid* hashmap;
|
||||
} lladdHash_t;
|
||||
typedef struct lladdHash_t lladdHash_t;
|
||||
|
||||
/** Allocate a new hash */
|
||||
|
||||
lladdHash_t * lHtCreate(int xid, int size);
|
||||
int lHtValid(int xid, lladdHash_t *ht);
|
||||
int lHtLookup( int xid, lladdHash_t *ht, const void *key, int keylen, void *buf );
|
||||
recordid lHtCreate(int xid, int size);
|
||||
int lHtDelete(int xid, lladdHash_t *ht);
|
||||
|
||||
lladdHash_t * lHtOpen(int xid, recordid rid) ;
|
||||
void lHtClose(int xid, lladdHash_t * lht);
|
||||
int lHtValid(int xid, lladdHash_t *ht);
|
||||
|
||||
recordid lHtLookup( int xid, lladdHash_t *ht, const void *key, int keylen);
|
||||
/**
|
||||
|
||||
@return ZERO_RECORDID if the entry did not already exist, the
|
||||
recordid of the old value of key otherwise.
|
||||
|
||||
*/
|
||||
recordid lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, recordid dat);
|
||||
/**
|
||||
The recommended code sequence for deletion of a value is this:
|
||||
|
||||
recordid old = lHtRemove(xid, ht, key, keylen);
|
||||
if(old != ZERO_RECORDID) { Tdealloc(xid, old); }
|
||||
|
||||
If you are certain that the value exists in the hashtable, then it
|
||||
is safe to skip the (old != ZERO_RECORDID) check.
|
||||
|
||||
@return the recordid of the entry if it existed, ZERO_RECORDID otherwise.
|
||||
*/
|
||||
recordid lHtRemove( int xid, lladdHash_t *ht, const void *key, int keylen);
|
||||
|
||||
/*
|
||||
int lHtFirst( int xid, lladdHash_t *ht, void *buf );
|
||||
int lHtNext( int xid, lladdHash_t *ht, void *buf );
|
||||
int lHtCurrent( int xid, lladdHash_t *ht, void *buf);
|
||||
int lHtCurrentKey(int xid, lladdHash_t *ht, void *buf);
|
||||
int lHtDelete(int xid, lladdHash_t *ht);
|
||||
int lHtPosition( int xid, lladdHash_t *ht, const void *key, int key_length );
|
||||
/* These two are the only ones that result in a log entry... */
|
||||
/*
|
||||
int _lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, void * dat, long datlen);
|
||||
int _lHtRemove( int xid, lladdHash_t *ht, const void *key, int keylen, void *buf );
|
||||
*/
|
||||
|
||||
int lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, void * dat, long datlen);
|
||||
int lHtRemove( int xid, lladdHash_t *ht, const void *key, int keylen, void *buf, long buflen);
|
||||
|
||||
Operation getLHInsert();
|
||||
Operation getLHRemove();
|
||||
|
||||
|
||||
#endif
|
||||
|
|
71
lladd/operations/pageOperations.h
Normal file
71
lladd/operations/pageOperations.h
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*---
|
||||
This software is copyrighted by the Regents of the University of
|
||||
California, and other parties. The following terms apply to all files
|
||||
associated with the software unless explicitly disclaimed in
|
||||
individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose,
|
||||
provided that existing copyright notices are retained in all copies
|
||||
and that this notice is included verbatim in any distributions. No
|
||||
written agreement, license, or royalty fee is required for any of the
|
||||
authorized uses. Modifications to this software may be copyrighted by
|
||||
their authors and need not follow the licensing terms described here,
|
||||
provided that the new terms are clearly indicated on the first page of
|
||||
each file where they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
|
||||
NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND
|
||||
THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE
|
||||
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights" in
|
||||
the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you are
|
||||
acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
---*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* function definitions for increment
|
||||
*
|
||||
* Increment provides an example of a logical operation that does not
|
||||
* require any extra logging information, and (could someday) support
|
||||
* reordering.
|
||||
*
|
||||
* @see decrement.h
|
||||
*
|
||||
* @ingroup OPERATIONS
|
||||
*
|
||||
* $Id$
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __PAGE_OPERATIONS_H__
|
||||
#define __PAGE_OPERATIONS_H__
|
||||
|
||||
#include <lladd/operations.h>
|
||||
|
||||
int TpageAlloc(int xid, int type);
|
||||
int TpageAllocMany(int xid, int count, int type);
|
||||
int TpageDealloc(int xid, int pageid);
|
||||
int TpageSet(int xid, int pageid, Page* dat);
|
||||
Operation getPageAlloc();
|
||||
Operation getPageDealloc();
|
||||
Operation getPageSet();
|
||||
#endif
|
|
@ -95,6 +95,9 @@ typedef struct {
|
|||
long size;
|
||||
} recordid;
|
||||
|
||||
extern const recordid ZERO_RID;
|
||||
|
||||
|
||||
/**
|
||||
If a recordid's slot field is set to this, then the recordid
|
||||
represents an array of fixed-length records starting at slot zero
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
lib_LIBRARIES=liblladd.a
|
||||
#liblladd_a_LIBADD=logger/liblogger.a operations/liboperations.a
|
||||
# removed: recovery.c transactional.c logger.c logger/logparser.c logger/logstreamer.c
|
||||
liblladd_a_SOURCES=common.c stats.c io.c bufferManager.c linkedlist.c operations.c pageFile.c pageCache.c page.c blobManager.c recovery2.c transactional2.c logger/logEntry.c logger/logWriter.c logger/logHandle.c logger/logger2.c operations/decrement.c operations/increment.c operations/prepare.c operations/set.c operations/alloc.c page/slotted.c page/indirect.c #operations/lladdhash.c
|
||||
liblladd_a_SOURCES=common.c stats.c io.c bufferManager.c linkedlist.c operations.c pageFile.c pageCache.c page.c blobManager.c recovery2.c transactional2.c logger/logEntry.c logger/logWriter.c logger/logHandle.c logger/logger2.c operations/pageOperations.c page/indirect.c operations/decrement.c operations/increment.c operations/prepare.c operations/set.c operations/alloc.c page/slotted.c operations/lladdhash.c
|
||||
AM_CFLAGS= -g -Wall -pedantic -std=gnu99
|
||||
|
||||
|
|
|
@ -68,7 +68,8 @@ const byte * getUpdateArgs(const LogEntry * ret) {
|
|||
|
||||
const byte * getUpdatePreImage(const LogEntry * ret) {
|
||||
assert(ret->type == UPDATELOG);
|
||||
if(operationsTable[ret->contents.update.funcID].undo != NO_INVERSE) {
|
||||
if(operationsTable[ret->contents.update.funcID].undo != NO_INVERSE &&
|
||||
operationsTable[ret->contents.update.funcID].undo != NO_INVERSE_WHOLE_PAGE) {
|
||||
return NULL;
|
||||
} else {
|
||||
return ((byte*)ret) + sizeof(struct __raw_log_entry) + sizeof(UpdateLogEntry) + ret->contents.update.argSize;
|
||||
|
@ -79,8 +80,9 @@ LogEntry * allocUpdateLogEntry(lsn_t prevLSN, int xid,
|
|||
unsigned int funcID, recordid rid,
|
||||
const byte * args, unsigned int argSize, const byte * preImage) {
|
||||
int invertible = operationsTable[funcID].undo != NO_INVERSE;
|
||||
|
||||
LogEntry * ret = malloc(sizeof(struct __raw_log_entry) + sizeof(UpdateLogEntry) + argSize + ((!invertible) ? rid.size : 0));
|
||||
int whole_page_phys = operationsTable[funcID].undo == NO_INVERSE_WHOLE_PAGE;
|
||||
LogEntry * ret = malloc(sizeof(struct __raw_log_entry) + sizeof(UpdateLogEntry) + argSize +
|
||||
((!invertible) ? rid.size : 0) + (whole_page_phys ? PAGE_SIZE : 0));
|
||||
ret->LSN = -1;
|
||||
ret->prevLSN = prevLSN;
|
||||
ret->xid = xid;
|
||||
|
@ -95,6 +97,9 @@ LogEntry * allocUpdateLogEntry(lsn_t prevLSN, int xid,
|
|||
if(!invertible) {
|
||||
memcpy((void*)getUpdatePreImage(ret), preImage, rid.size);
|
||||
}
|
||||
if(whole_page_phys) {
|
||||
memcpy((void*)getUpdatePreImage(ret), preImage, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -123,7 +128,8 @@ long sizeofLogEntry(const LogEntry * log) {
|
|||
return sizeof(struct __raw_log_entry) + sizeof(CLRLogEntry);
|
||||
case UPDATELOG:
|
||||
return sizeof(struct __raw_log_entry) + sizeof(UpdateLogEntry) + log->contents.update.argSize +
|
||||
((operationsTable[log->contents.update.funcID].undo == NO_INVERSE) ? log->contents.update.rid.size : 0);
|
||||
((operationsTable[log->contents.update.funcID].undo == NO_INVERSE) ? log->contents.update.rid.size : 0) +
|
||||
((operationsTable[log->contents.update.funcID].undo == NO_INVERSE_WHOLE_PAGE) ? PAGE_SIZE : 0) ;
|
||||
default:
|
||||
return sizeof(struct __raw_log_entry);
|
||||
}
|
||||
|
|
|
@ -100,7 +100,13 @@ LogEntry * LogUpdate(TransactionLog * l, Page * p, recordid rid, int operation,
|
|||
if(!preImage) { perror("malloc"); abort(); }
|
||||
readRecord(l->xid, p, rid, preImage);
|
||||
DEBUG("got preimage");
|
||||
}
|
||||
} else if (operationsTable[operation].undo == NO_INVERSE_WHOLE_PAGE) {
|
||||
DEBUG("Logging entire page\n");
|
||||
preImage = malloc(PAGE_SIZE);
|
||||
if(!preImage) { perror("malloc"); abort(); }
|
||||
memcpy(preImage, p->memAddr, PAGE_SIZE);
|
||||
DEBUG("got preimage");
|
||||
}
|
||||
|
||||
|
||||
e = allocUpdateLogEntry(l->prevLSN, l->xid, operation, rid, args, argSize, preImage);
|
||||
|
|
|
@ -44,7 +44,7 @@ terms specified in this license.
|
|||
#include "logger/logWriter.h"
|
||||
#include <lladd/bufferManager.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <string.h>
|
||||
/** @todo questionable include */
|
||||
#include "page.h"
|
||||
|
||||
|
@ -120,6 +120,11 @@ void undoUpdate(const LogEntry * e, Page * p, lsn_t clr_lsn) {
|
|||
|
||||
DEBUG("OPERATION Physical undo, %ld {%d %d %ld}\n", e->LSN, rid.page, rid.slot, rid.size);
|
||||
writeRecord(e->xid, p, clr_lsn, e->contents.update.rid, getUpdatePreImage(e));
|
||||
} else if(undo == NO_INVERSE_WHOLE_PAGE) {
|
||||
DEBUG("OPERATION Whole page physical undo, %ld {%d}\n", e->LSN, rid.page);
|
||||
memcpy(p->memAddr, getUpdatePreImage(e), PAGE_SIZE);
|
||||
pageWriteLSN(p, clr_lsn);
|
||||
|
||||
} else {
|
||||
/* @see doUpdate() */
|
||||
/* printf("Logical undo"); fflush(NULL); */
|
||||
|
|
|
@ -81,6 +81,8 @@ recordid Talloc(int xid, long size) {
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
Operation getDealloc() {
|
||||
Operation o = {
|
||||
OPERATION_DEALLOC,
|
||||
|
|
|
@ -2,72 +2,69 @@
|
|||
#include <lladd/common.h>
|
||||
|
||||
#include <lladd/operations/lladdhash.h>
|
||||
#include <lladd/operations/pageOperations.h>
|
||||
#include <lladd/bufferManager.h>
|
||||
#include <lladd/transactional.h>
|
||||
|
||||
#include "../page/indirect.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
static const recordid ZERO_RECORDID = {0,0,0};
|
||||
|
||||
typedef struct {
|
||||
int ht;
|
||||
size_t keylen;
|
||||
size_t datlen;
|
||||
} lladdHashRec_t;
|
||||
const recordid ZERO_RID = {0,0,0};
|
||||
|
||||
static lladdHash_t * lladdHashes[MAX_LLADDHASHES];
|
||||
int next_lladdHash = 0;
|
||||
/** All of this information is derived from the hashTables rid at
|
||||
runtime, or is specific to the piece of memory returned by
|
||||
lHtOpen() */
|
||||
struct lladdHash_t {
|
||||
recordid hashTable;
|
||||
int iter_next_bucket;
|
||||
recordid iter_next_ptr;
|
||||
};
|
||||
|
||||
lladdHash_t * lHtCreate(int xid, int size) {
|
||||
recordid lHtCreate(int xid, int size) {
|
||||
return rallocMany(xid, sizeof(recordid), size);
|
||||
|
||||
lladdHash_t *ht;
|
||||
|
||||
ht = lladdHashes[next_lladdHash] = (lladdHash_t*)malloc(sizeof(lladdHash_t));
|
||||
|
||||
|
||||
if( ht ) {
|
||||
recordid * hm = malloc(sizeof(recordid) * size);
|
||||
if(hm) {
|
||||
memset(hm, 0, sizeof(recordid)*size);
|
||||
|
||||
ht->size = size;
|
||||
ht->store = next_lladdHash; /*Talloc(xid, sizeof(lladdHash_t));*/
|
||||
ht->hashmap_record = Talloc(xid, sizeof(recordid) * size);
|
||||
/*ht->hashmap = NULL;*/ /* Always should be NULL in the store, so that we know if we need to read it in */
|
||||
/* Tset(xid, ht->store, ht); */
|
||||
ht->hashmap = hm;
|
||||
Tset(xid, ht->hashmap_record, ht->hashmap);
|
||||
ht->iterIndex = 0;
|
||||
ht->iterData = NULL;
|
||||
|
||||
next_lladdHash++;
|
||||
|
||||
return ht;
|
||||
} else {
|
||||
free(ht);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/** @todo lHtDelete is unimplemented. First need to implement derallocMany() */
|
||||
int lHtDelete(int xid, lladdHash_t *ht) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
lladdHash_t * lHtOpen(int xid, recordid rid) {
|
||||
lladdHash_t * ret = malloc(sizeof(lladdHash_t));
|
||||
ret->hashTable = rid;
|
||||
ret->iter_next_bucket = 0;
|
||||
ret->iter_next_ptr = ZERO_RID;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void lHtClose(int xid, lladdHash_t * lht) {
|
||||
free(lht);
|
||||
}
|
||||
/** @todo lHtValid could be more thorough. In particular, if we used
|
||||
arrays of fixed length pages, then we could cheaply verify that
|
||||
the entire hashbucket set had the correct size. */
|
||||
int lHtValid(int xid, lladdHash_t *ht) {
|
||||
/*
|
||||
int ret;
|
||||
lladdHash_t *test ; = (lladdHash_t*)malloc(sizeof(lladdHash_t));
|
||||
Tread(xid, ht->store, test);
|
||||
ret = ( test->store.size == ht->store.size
|
||||
&& test->store.slot == ht->store.slot
|
||||
&& test->store.page == ht->store.page ); */
|
||||
/* TODO: Check hashmap_record? */
|
||||
/* free(test); */
|
||||
|
||||
assert(0); /* unimplemented! */
|
||||
|
||||
return 1;
|
||||
Page * p = loadPage(ht->hashTable.page);
|
||||
int ret = 1;
|
||||
if(*page_type_ptr(p) != INDIRECT_PAGE) {
|
||||
ret = 0;
|
||||
}
|
||||
if(ht->hashTable.slot == 0) {
|
||||
ht->hashTable.slot = 1;
|
||||
if(dereferenceRID(ht->hashTable).size != sizeof(recordid)) {
|
||||
ret = 0;
|
||||
}
|
||||
ht->hashTable.slot = 0;
|
||||
} else {
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hash function generator, taken directly from pblhash
|
||||
*/
|
||||
|
@ -86,370 +83,200 @@ static int hash( const unsigned char * key, size_t keylen, int size ) {
|
|||
return( ret % size );
|
||||
}
|
||||
|
||||
/** Should be called the first time ht->hashmap is accessed by a library function.
|
||||
Checks to see if the hashmap record has been read in, reads it if necessary, and then
|
||||
returns a pointer to it. */
|
||||
static recordid* _getHashMap(int xid, lladdHash_t *ht) {
|
||||
if(! ht->hashmap) {
|
||||
printf("Reading in hashmap.\n");
|
||||
ht->hashmap = malloc(sizeof(recordid) * ht->size);
|
||||
Tread(xid, ht->hashmap_record, ht->hashmap);
|
||||
typedef struct {
|
||||
recordid data;
|
||||
recordid next;
|
||||
int keyLength;
|
||||
} lht_entry_record;
|
||||
|
||||
static lht_entry_record * follow_overflow(int xid, lht_entry_record * entry) {
|
||||
if(!isNullRecord(entry->next)) {
|
||||
recordid next = entry->next;
|
||||
entry = malloc(next.size);
|
||||
Tread(xid, next, entry);
|
||||
return entry;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static lht_entry_record * getEntry(int xid, recordid * entryRID, lladdHash_t * ht, const void * key, int keySize) {
|
||||
|
||||
recordid bucket = ht->hashTable;
|
||||
lht_entry_record * entry;
|
||||
|
||||
recordid tmp;
|
||||
|
||||
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
|
||||
|
||||
if(!entryRID) {
|
||||
entryRID = &tmp;
|
||||
}
|
||||
|
||||
|
||||
return ht->hashmap;
|
||||
}
|
||||
/* TODO: Insert and Remove need to bypass Talloc(), so that recovery won't crash. (Otherwise, we double-free records...This
|
||||
was not noticed before, since recovery never freed pages.) */
|
||||
int _lHtInsert(int xid, recordid garbage, lladdHashRec_t * arg) {
|
||||
|
||||
/* recordid ht_rec = arg->ht; */
|
||||
|
||||
size_t keylen = arg->keylen;
|
||||
size_t datlen = arg->datlen;
|
||||
void * key = ((void*)arg) + sizeof(lladdHashRec_t);
|
||||
void * dat = ((void*)arg) + sizeof(lladdHashRec_t) + keylen;
|
||||
|
||||
|
||||
lladdHash_t * ht;
|
||||
int index;
|
||||
recordid rid;
|
||||
void *newd;
|
||||
lladdHashItem_t newi;
|
||||
|
||||
|
||||
// printf("Inserting %d -> %d\n", *(int*)key, *(int*)dat);
|
||||
|
||||
ht = lladdHashes[arg->ht];
|
||||
|
||||
/* Tread(xid, ht_rec, &ht); */
|
||||
|
||||
index = hash( key, keylen, ht->size);
|
||||
rid = _getHashMap(xid, ht)[index];
|
||||
Tread(xid, bucket, entryRID);
|
||||
|
||||
/* printf("Inserting %d -> %s %d {%d %d %d}\n", *(int*)key, dat, index, rid.page, rid.slot, rid.size); */
|
||||
|
||||
|
||||
if( rid.size == 0 ) { /* nothing with this hash has been inserted */
|
||||
|
||||
newi.store = Talloc(xid, sizeof(lladdHashItem_t)+keylen+datlen);
|
||||
newd = malloc(sizeof(lladdHashItem_t)+keylen+datlen);
|
||||
newi.keylen = keylen;
|
||||
newi.datlen = datlen;
|
||||
newi.next = ZERO_RECORDID;
|
||||
memcpy(newd, &newi, sizeof(lladdHashItem_t));
|
||||
memcpy(newd+sizeof(lladdHashItem_t), key, keylen);
|
||||
memcpy(newd+sizeof(lladdHashItem_t)+keylen, dat, datlen);
|
||||
writeRecord(xid, newi.store, newd);
|
||||
if(!isNullRecord(*entryRID)) {
|
||||
|
||||
ht->hashmap[index] = newi.store;
|
||||
/* Tset(xid, ht->store, ht); */
|
||||
/* printf("Writing hashmap slot {%d %d %d}[%d] = {%d,%d,%d}.\n",
|
||||
ht.hashmap_record.page,ht.hashmap_record.slot,ht.hashmap_record.size,
|
||||
index,
|
||||
ht.hashmap[index].page,ht.hashmap[index].slot,ht.hashmap[index].size); */
|
||||
writeRecord(xid, ht->hashmap_record, ht->hashmap);
|
||||
|
||||
free(newd);
|
||||
entry = malloc(entryRID->size);
|
||||
|
||||
Tread(xid, *entryRID, entry);
|
||||
} else {
|
||||
|
||||
void *item = NULL;
|
||||
|
||||
do {
|
||||
|
||||
free(item); /* NULL ignored by free */
|
||||
item = malloc(rid.size);
|
||||
Tread(xid, rid, item);
|
||||
if( ((lladdHashItem_t*)item)->keylen == keylen && !memcmp(key, item+sizeof(lladdHashItem_t), keylen)) {
|
||||
memcpy(item+sizeof(lladdHashItem_t)+keylen, dat, ((lladdHashItem_t*)item)->datlen);
|
||||
writeRecord(xid, ((lladdHashItem_t*)item)->store, item);
|
||||
free(item);
|
||||
return 0;
|
||||
}
|
||||
rid = ((lladdHashItem_t*)item)->next; /* could go off end of list */
|
||||
} while( ((lladdHashItem_t*)item)->next.size != 0 );
|
||||
/* now item is the tail */
|
||||
|
||||
newi.store = Talloc(xid, sizeof(lladdHashItem_t)+keylen+datlen);
|
||||
newd = malloc(sizeof(lladdHashItem_t)+keylen+datlen);
|
||||
newi.keylen = keylen;
|
||||
newi.datlen = datlen;
|
||||
newi.next = ZERO_RECORDID;
|
||||
memcpy(newd, &newi, sizeof(lladdHashItem_t));
|
||||
memcpy(newd+sizeof(lladdHashItem_t), key, keylen);
|
||||
memcpy(newd+sizeof(lladdHashItem_t)+keylen, dat, datlen);
|
||||
writeRecord(xid, newi.store, newd);
|
||||
|
||||
((lladdHashItem_t*)item)->next = newi.store;
|
||||
writeRecord(xid, ((lladdHashItem_t*)item)->store, item);
|
||||
free(item);
|
||||
free(newd);
|
||||
entry = NULL;
|
||||
}
|
||||
while(entry && memcmp(entry+1, key, keySize)) {
|
||||
*entryRID = entry->next;
|
||||
if(!isNullRecord(*entryRID)) {
|
||||
lht_entry_record * newEntry = follow_overflow(xid, entry);
|
||||
free(entry);
|
||||
entry=newEntry;
|
||||
} else {
|
||||
entry=NULL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
/**Todo: ht->iterData is global to the hash table... seems like a bad idea! */
|
||||
int lHtPosition( int xid, lladdHash_t *ht, const void *key, int key_length ) {
|
||||
int index = hash(key, key_length, ht->size);
|
||||
|
||||
recordid rid = _getHashMap(xid, ht)[index];
|
||||
|
||||
if(rid.size == 0) {
|
||||
printf("rid,size = 0\n");
|
||||
return -1;
|
||||
} else {
|
||||
//void * item = NULL;
|
||||
lladdHashItem_t * item = malloc(rid.size);
|
||||
|
||||
|
||||
for(Tread(xid, rid, item) ;
|
||||
!(item->keylen == key_length && !memcmp(key, ((void*)item)+sizeof(lladdHashItem_t), key_length)) ;
|
||||
rid = item->next) {
|
||||
if(rid.size == 0) {
|
||||
printf("Found bucket, but item not here!\n");
|
||||
return -1; // Not in hash table.
|
||||
}
|
||||
free(item);
|
||||
item = malloc(rid.size);
|
||||
Tread(xid, rid, item);
|
||||
}
|
||||
/* item is what we want.. */
|
||||
ht->iterIndex = index+1; //iterIndex is the index of the next interesting hash bucket.
|
||||
ht->iterData = item; //Freed in lHtNext
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
||||
int lHtLookup( int xid, lladdHash_t *ht, const void *key, int keylen, void *buf ) {
|
||||
|
||||
int index = hash(key, keylen, ht->size);
|
||||
recordid rid = _getHashMap(xid, ht)[index];
|
||||
/* printf("lookup: %d -> %d {%d %d %d} \n", *(int*)key, index, rid.page, rid.slot, rid.size); */
|
||||
if( rid.size == 0 ) { /* nothing inserted with this hash */
|
||||
return -1;
|
||||
} else {
|
||||
void *item = NULL;
|
||||
item = malloc(rid.size);
|
||||
Tread(xid, rid, item);
|
||||
|
||||
for( ; !(((lladdHashItem_t*)item)->keylen == keylen && !memcmp(key, item+sizeof(lladdHashItem_t), keylen));
|
||||
rid = ((lladdHashItem_t*)item)->next ) {
|
||||
if( rid.size == 0) { /* at the end of the list and not found */
|
||||
return -1;
|
||||
}
|
||||
free(item);
|
||||
item = malloc(rid.size);
|
||||
Tread(xid, rid, item);
|
||||
}
|
||||
/* rid is what we want */
|
||||
|
||||
memcpy(buf, item+sizeof(lladdHashItem_t)+((lladdHashItem_t*)item)->keylen, ((lladdHashItem_t*)item)->datlen);
|
||||
free(item);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int _lHtRemove( int xid, recordid garbage, lladdHashRec_t * arg) {
|
||||
|
||||
size_t keylen = arg->keylen;
|
||||
void * key = ((void*)arg) + sizeof(lladdHashRec_t);
|
||||
|
||||
lladdHash_t * ht = lladdHashes[arg->ht];
|
||||
|
||||
int index;
|
||||
recordid rid;
|
||||
|
||||
// printf("Removing %d\n", *(int*)key);
|
||||
|
||||
index = hash(key, keylen, ht->size);
|
||||
rid = _getHashMap(xid, ht)[index];
|
||||
|
||||
if( rid.size == 0) { /* nothing inserted with this hash */
|
||||
return -1;
|
||||
} else {
|
||||
void *del = malloc(rid.size);
|
||||
Tread(xid, rid, del);
|
||||
if( ((lladdHashItem_t*)del)->keylen == keylen && !memcmp(key, del+sizeof(lladdHashItem_t), keylen) ) {
|
||||
/* the head is the entry to be removed */
|
||||
/* if( buf ) {
|
||||
memcpy( buf, del+sizeof(lladdHashItem_t*)+keylen, ((lladdHashItem_t*)del)->datlen);
|
||||
} */
|
||||
ht->hashmap[index] = ((lladdHashItem_t*)del)->next;
|
||||
/* Tset(xid, ht->store, ht); */
|
||||
writeRecord(xid, ht->hashmap_record, ht->hashmap);
|
||||
|
||||
/* TODO: dealloc rid */
|
||||
return 0;
|
||||
} else {
|
||||
void * prevd = NULL;
|
||||
while( ((lladdHashItem_t*)del)->next.size ) {
|
||||
free(prevd); /* free will ignore NULL args */
|
||||
prevd = del;
|
||||
rid = ((lladdHashItem_t*)del)->next;
|
||||
del = malloc(rid.size);
|
||||
Tread(xid, rid, del);
|
||||
if( ((lladdHashItem_t*)del)->keylen == keylen && !memcmp(key, del+sizeof(lladdHashItem_t), keylen) ) {
|
||||
/* if( buf ) {
|
||||
memcpy( buf, del+sizeof(lladdHashItem_t)+keylen, ((lladdHashItem_t*)del)->datlen);
|
||||
} */
|
||||
((lladdHashItem_t*)prevd)->next = ((lladdHashItem_t*)del)->next;
|
||||
writeRecord(xid, ((lladdHashItem_t*)prevd)->store, prevd);
|
||||
/* TODO: dealloc rid */
|
||||
free(prevd);
|
||||
free(del);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* could not find exact key */
|
||||
|
||||
free(prevd);
|
||||
free(del);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
assert( 0 ); /* should not get here */
|
||||
return -1;
|
||||
}
|
||||
|
||||
int lHtFirst( int xid, lladdHash_t *ht, void *buf ) {
|
||||
|
||||
ht->iterIndex = 0;
|
||||
ht->iterData = NULL;
|
||||
return lHtNext( xid, ht, buf);
|
||||
}
|
||||
|
||||
int lHtNext( int xid, lladdHash_t *ht, void *buf ) {
|
||||
_getHashMap(xid, ht);
|
||||
if( ht->iterData && (((lladdHashItem_t*)(ht->iterData))->next.size != 0) ) {
|
||||
recordid next = ((lladdHashItem_t*)(ht->iterData))->next;
|
||||
free( ht->iterData );
|
||||
ht->iterData = malloc(next.size);
|
||||
Tread(xid, next, ht->iterData);
|
||||
} else {
|
||||
while(ht->iterIndex < ht->size) {
|
||||
if( ht->hashmap[ht->iterIndex].size )
|
||||
break;
|
||||
else
|
||||
ht->iterIndex++;
|
||||
}
|
||||
if( ht->iterIndex == ht->size) /* went through and found no data */
|
||||
return -1;
|
||||
|
||||
free( ht->iterData );
|
||||
ht->iterData = malloc(ht->hashmap[ht->iterIndex].size); /* to account for the last post incr */
|
||||
Tread(xid, ht->hashmap[ht->iterIndex++], ht->iterData); /* increment for next round */
|
||||
}
|
||||
|
||||
return lHtCurrent(xid, ht, buf);
|
||||
}
|
||||
|
||||
int lHtCurrent(int xid, lladdHash_t *ht, void *buf) {
|
||||
|
||||
if( ht->iterData ) {
|
||||
if(buf)
|
||||
memcpy(buf, ht->iterData + sizeof(lladdHashItem_t) + ((lladdHashItem_t*)(ht->iterData))->keylen, ((lladdHashItem_t*)(ht->iterData))->datlen);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
int lHtCurrentKey(int xid, lladdHash_t *ht, void *buf) {
|
||||
|
||||
if( ht->iterData ) {
|
||||
memcpy(buf, ht->iterData + sizeof(lladdHashItem_t), ((lladdHashItem_t*)(ht->iterData))->keylen);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int lHtDelete(int xid, lladdHash_t *ht) {
|
||||
|
||||
/* deralloc ht->store */
|
||||
|
||||
if(ht->hashmap) { free(ht->hashmap); }
|
||||
free(ht);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, void *dat, long datlen) {
|
||||
recordid rid;
|
||||
void * log_r;
|
||||
lladdHashRec_t lir;
|
||||
rid.page = 0;
|
||||
rid.slot = 0;
|
||||
rid.size = sizeof(lladdHashRec_t) + keylen + datlen;
|
||||
|
||||
lir.ht = ht->store;
|
||||
lir.keylen = keylen;
|
||||
lir.datlen = datlen;
|
||||
|
||||
log_r = malloc(rid.size);
|
||||
memcpy(log_r, &lir, sizeof(lladdHashRec_t));
|
||||
memcpy(log_r+sizeof(lladdHashRec_t), key, keylen);
|
||||
memcpy(log_r+sizeof(lladdHashRec_t)+keylen, dat, datlen);
|
||||
|
||||
/* printf("Tupdating: %d -> %s\n", *(int*)key, dat); */
|
||||
|
||||
Tupdate(xid,rid,log_r, OPERATION_LHINSERT);
|
||||
return 0;
|
||||
|
||||
return entry;
|
||||
}
|
||||
int lHtRemove( int xid, lladdHash_t *ht, const void *key, int keylen, void *buf, long buflen ) {
|
||||
/** Insert a new entry into the hashtable. The entry *must not* already exist. */
|
||||
static void insert_entry(int xid, lladdHash_t * ht, const void * key, int keySize, recordid dat) {
|
||||
/* First, create the entry in memory. */
|
||||
|
||||
recordid rid;
|
||||
void * log_r;
|
||||
lladdHashRec_t lrr;
|
||||
int ret = lHtLookup(xid, ht, key, keylen, buf);
|
||||
|
||||
/* printf("Looked up: %d\n", *(int*)buf); */
|
||||
|
||||
if(ret >= 0) {
|
||||
rid.page = 0;
|
||||
rid.slot = 0;
|
||||
rid.size = sizeof(lladdHashRec_t) + keylen + buflen;
|
||||
recordid bucket = ht->hashTable;
|
||||
lht_entry_record * entry = malloc(sizeof(lht_entry_record) + keySize);
|
||||
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
|
||||
|
||||
entry->data = dat;
|
||||
Tread(xid, bucket, &(entry->next));
|
||||
entry->keyLength = keySize;
|
||||
memcpy(entry+1, key, keySize);
|
||||
|
||||
/* Now, write the changes to disk. */
|
||||
|
||||
recordid entryRID = Talloc(xid, sizeof(lht_entry_record) + keySize);
|
||||
Tset(xid, entryRID, entry);
|
||||
Tset(xid, bucket, &entryRID);
|
||||
|
||||
free(entry);
|
||||
|
||||
}
|
||||
/** Assumes that the entry does, in fact, exist. */
|
||||
static void delete_entry(int xid, lladdHash_t * ht, const void * key, int keySize) {
|
||||
|
||||
lht_entry_record * entryToDelete;
|
||||
lht_entry_record * prevEntry = NULL;
|
||||
recordid prevEntryRID;
|
||||
recordid currentEntryRID;
|
||||
recordid nextEntryRID;
|
||||
|
||||
recordid bucket = ht->hashTable;
|
||||
bucket.slot = hash(key, keySize, indirectPageRecordCount(bucket));
|
||||
|
||||
Tread(xid, bucket, ¤tEntryRID);
|
||||
|
||||
entryToDelete = malloc(currentEntryRID.size);
|
||||
Tread(xid, currentEntryRID, entryToDelete);
|
||||
nextEntryRID = entryToDelete->next;
|
||||
|
||||
while(memcmp(entryToDelete+1, key, keySize)) {
|
||||
|
||||
if(prevEntry) {
|
||||
free(prevEntry);
|
||||
}
|
||||
prevEntry = entryToDelete;
|
||||
prevEntryRID = currentEntryRID;
|
||||
|
||||
entryToDelete = follow_overflow(xid, entryToDelete);
|
||||
assert(entryToDelete);
|
||||
currentEntryRID = nextEntryRID;
|
||||
|
||||
lrr.ht = ht->store;
|
||||
lrr.keylen = keylen;
|
||||
lrr.datlen = buflen;
|
||||
|
||||
log_r = malloc(sizeof(lladdHashRec_t) + keylen + buflen);
|
||||
memcpy(log_r, &lrr, sizeof(lladdHashRec_t));
|
||||
memcpy(log_r+sizeof(lladdHashRec_t), key, keylen);
|
||||
memcpy(log_r+sizeof(lladdHashRec_t)+keylen, buf, buflen);
|
||||
nextEntryRID = entryToDelete->next;
|
||||
}
|
||||
|
||||
lrr.datlen = buflen;
|
||||
if(prevEntry) {
|
||||
prevEntry->next = nextEntryRID;
|
||||
Tset(xid, prevEntryRID, prevEntry);
|
||||
free(prevEntry);
|
||||
} else {
|
||||
Tset(xid, bucket, &nextEntryRID);
|
||||
}
|
||||
Tdealloc(xid, currentEntryRID);
|
||||
free(entryToDelete);
|
||||
|
||||
Tupdate(xid,rid,log_r, OPERATION_LHREMOVE);
|
||||
|
||||
free (log_r);
|
||||
}
|
||||
|
||||
recordid lHtLookup( int xid, lladdHash_t *ht, const void *key, int keylen) {
|
||||
recordid ret;
|
||||
lht_entry_record * entry = getEntry(xid, NULL, ht, key, keylen);
|
||||
if(entry) {
|
||||
ret = entry->data;
|
||||
free(entry);
|
||||
} else {
|
||||
ret = ZERO_RID;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
Operation getLHInsert() {
|
||||
Operation o = {
|
||||
OPERATION_LHINSERT,
|
||||
SIZEOF_RECORD, /* use the size of the record as size of arg (nasty, ugly evil hack, since we end up passing in record = {0, 0, sizeof() */
|
||||
OPERATION_LHREMOVE,
|
||||
(Function)&_lHtInsert
|
||||
};
|
||||
return o;
|
||||
|
||||
|
||||
recordid lHtInsert(int xid, lladdHash_t *ht, const void *key, int keylen, recordid dat) { /*{void *dat, long datlen) { */
|
||||
recordid entryRID;
|
||||
recordid ret;
|
||||
lht_entry_record * entry = getEntry(xid, &entryRID, ht, key, keylen);
|
||||
if(entry){
|
||||
/* assert(0); */
|
||||
ret = entry->data;
|
||||
entry->data = dat;
|
||||
Tset(xid, entryRID, entry);
|
||||
} else {
|
||||
insert_entry(xid, ht, key, keylen, dat);
|
||||
ret = ZERO_RID;
|
||||
}
|
||||
return ret;
|
||||
|
||||
}
|
||||
/** @todo lHtRemove could be more efficient. Currently, it looks up
|
||||
the hash table entry twice to remove it. */
|
||||
recordid lHtRemove( int xid, lladdHash_t *ht, const void *key, int keySize) {
|
||||
|
||||
/* ret = lookup key */
|
||||
lht_entry_record * entry = getEntry(xid, NULL, ht, key, keySize);
|
||||
recordid data;
|
||||
|
||||
if(entry) {
|
||||
data = entry->data;
|
||||
|
||||
delete_entry(xid, ht, key, keySize);
|
||||
|
||||
} else {
|
||||
data = ZERO_RID;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
Operation getLHRemove() {
|
||||
Operation o = {
|
||||
OPERATION_LHREMOVE,
|
||||
SIZEOF_RECORD, /* use the size of the record as size of arg (nasty, ugly evil hack.) */
|
||||
OPERATION_LHINSERT,
|
||||
(Function)&_lHtRemove
|
||||
};
|
||||
return o;
|
||||
|
||||
/** @todo hashtable iterators are currently unimplemented... */
|
||||
int lHtPosition( int xid, lladdHash_t *ht, const void *key, int key_length ) {
|
||||
abort();
|
||||
return -1;
|
||||
}
|
||||
int lHtFirst( int xid, lladdHash_t *ht, void *buf ) {
|
||||
|
||||
/* ht->iterIndex = 0;
|
||||
ht->iterData = NULL;
|
||||
return lHtNext( xid, ht, buf); */
|
||||
abort();
|
||||
return -1;
|
||||
}
|
||||
int lHtNext( int xid, lladdHash_t *ht, void *buf ) {
|
||||
abort();
|
||||
return -1;
|
||||
}
|
||||
int lHtCurrent(int xid, lladdHash_t *ht, void *buf) {
|
||||
abort();
|
||||
return -1;
|
||||
}
|
||||
int lHtCurrentKey(int xid, lladdHash_t *ht, void *buf) {
|
||||
abort();
|
||||
return -1;
|
||||
}
|
||||
int isNullRecord(recordid x) {
|
||||
return (((x).slot == 0) && ((x).page == 0) && ((x).size==0));
|
||||
}
|
||||
|
|
115
src/lladd/operations/pageOperations.c
Normal file
115
src/lladd/operations/pageOperations.c
Normal file
|
@ -0,0 +1,115 @@
|
|||
#include "../page.h"
|
||||
#include <lladd/operations/pageOperations.h>
|
||||
#include <assert.h>
|
||||
int __pageAlloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
||||
int type = *(int*)d;
|
||||
|
||||
*page_type_ptr(p) = type;
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int __pageDealloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
||||
*page_type_ptr(p) = UNINITIALIZED_PAGE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __pageSet(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
||||
memcpy(p->memAddr, d, PAGE_SIZE);
|
||||
pageWriteLSN(p, lsn);
|
||||
return 0;
|
||||
}
|
||||
int TpageSet(int xid, int pageid, Page* p) {
|
||||
recordid rid;
|
||||
rid.page = pageid;
|
||||
rid.slot = 0;
|
||||
rid.size = 0;
|
||||
Tupdate(xid,rid,p->memAddr, OPERATION_PAGE_SET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @todo Need to re-think TpageDealloc/TpageAlloc's logging
|
||||
strategies when we implement page re-use. Currently, TpageDealloc can
|
||||
use logical logging. Perhaps TpageDealloc should use physical
|
||||
logging, and wipe the page to zero, while pageAlloc should continue to
|
||||
use logical logging. (Have we ever had operation's whose inverses
|
||||
took differnt types of log entries? Do such operations work?) */
|
||||
|
||||
int TpageDealloc(int xid, int pageid) {
|
||||
recordid rid;
|
||||
rid.page = pageid;
|
||||
rid.slot = 0;
|
||||
rid.size = 0;
|
||||
|
||||
Page * p = loadPage(pageid);
|
||||
int type = *page_type_ptr(p);
|
||||
releasePage(p);
|
||||
|
||||
Tupdate(xid, rid, &type, OPERATION_PAGE_DEALLOC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int TpageAlloc(int xid, int type) {
|
||||
recordid rid;
|
||||
|
||||
int pageid = pageAllocMultiple(1);
|
||||
|
||||
rid.page = pageid;
|
||||
rid.slot = 0;
|
||||
rid.size = 0;
|
||||
|
||||
Tupdate(xid, rid, &type, OPERATION_PAGE_ALLOC);
|
||||
return pageid;
|
||||
}
|
||||
/** Allocs an extent of pages. @todo CONCURRENCY BUG TpageAllocMany
|
||||
can not be concurrent until ralloc uses TpageAlloc to allocate new
|
||||
records. (And. concurrency for TpageAllocMany hasn't been
|
||||
implemented yet...
|
||||
*/
|
||||
int TpageAllocMany(int xid, int count, int type) {
|
||||
int firstPage;
|
||||
int lastPage = -1;
|
||||
for(int i = 0 ; i < count; i++) {
|
||||
int thisPage = TpageAlloc(xid, type);
|
||||
if(lastPage == -1) {
|
||||
firstPage = lastPage = thisPage;
|
||||
} else {
|
||||
assert(lastPage +1 == thisPage);
|
||||
lastPage = thisPage;
|
||||
}
|
||||
}
|
||||
return firstPage;
|
||||
}
|
||||
|
||||
Operation getPageAlloc() {
|
||||
Operation o = {
|
||||
OPERATION_PAGE_ALLOC,
|
||||
sizeof(int),
|
||||
OPERATION_PAGE_DEALLOC,
|
||||
&__pageAlloc
|
||||
};
|
||||
return o;
|
||||
}
|
||||
|
||||
Operation getPageDealloc() {
|
||||
Operation o = {
|
||||
OPERATION_PAGE_DEALLOC,
|
||||
sizeof(int),
|
||||
OPERATION_PAGE_ALLOC,
|
||||
&__pageDealloc
|
||||
};
|
||||
return o;
|
||||
}
|
||||
|
||||
Operation getPageSet() {
|
||||
Operation o = {
|
||||
OPERATION_PAGE_SET,
|
||||
PAGE_SIZE, /* This is the type of the old page, for undo purposes */
|
||||
/*OPERATION_PAGE_SET, */ NO_INVERSE_WHOLE_PAGE,
|
||||
&__pageSet
|
||||
};
|
||||
return o;
|
||||
}
|
||||
|
||||
|
|
@ -221,11 +221,18 @@ void pageCommit(int xid) {
|
|||
void pageAbort(int xid) {
|
||||
}
|
||||
|
||||
/**
|
||||
@todo DATA CORRUPTION BUG pageAllocMultiple needs to scan forward in the store file until
|
||||
it finds page(s) with type = UNINITIALIZED_PAGE. Otherwise, after recovery, it will trash the storefile.
|
||||
|
||||
A better way to implement this is probably to reserve the first
|
||||
slot of the first page in the storefile for metadata, and to keep
|
||||
lastFreepage there, instead of in RAM.
|
||||
*/
|
||||
int pageAllocMultiple(int newPageCount) {
|
||||
pthread_mutex_lock(&lastFreepage_mutex);
|
||||
int ret = lastFreepage+1;
|
||||
lastFreepage += newPageCount;
|
||||
int ret = lastFreepage+1; /* Currently, just discard the current page. */
|
||||
lastFreepage += (newPageCount + 1);
|
||||
pthread_mutex_unlock(&lastFreepage_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -278,6 +278,8 @@ void pageRealloc(Page * p, int id);
|
|||
|
||||
/** Allocates a set of contiguous pages on disk. Has nothing to do with pageAlloc.
|
||||
@todo need a better naming convention for pageAlloc (alloc's memory) and pageAllocMultiple (alloc's disk)
|
||||
|
||||
@todo is there any case where this function can safely be called with newPageCount > 1?
|
||||
*/
|
||||
int pageAllocMultiple(int newPageCount) ;
|
||||
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
#include <assert.h>
|
||||
#include "../blobManager.h"
|
||||
#include "../page.h"
|
||||
|
||||
#include <lladd/operations.h>
|
||||
|
||||
void indirectInitialize(Page * p, int height) {
|
||||
*level_ptr(p) = height;
|
||||
*page_type_ptr(p) = INDIRECT_PAGE;
|
||||
|
@ -53,7 +56,17 @@ unsigned int calculate_level (unsigned int number_of_pages) {
|
|||
return level;
|
||||
}
|
||||
|
||||
recordid rallocMany(int parentPage, lsn_t lsn, int recordSize, int recordCount) {
|
||||
recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount);
|
||||
/**
|
||||
@todo is there a way to implement rallocMany so that it doesn't
|
||||
have to physically log pre- and post-images of the allocated space?
|
||||
*/
|
||||
recordid rallocMany(int xid, int recordSize, int recordCount) {
|
||||
int page = TpageAlloc(xid, SLOTTED_PAGE);
|
||||
return __rallocMany(xid, page, recordSize, recordCount);
|
||||
}
|
||||
|
||||
recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount) {
|
||||
|
||||
/* How many levels of pages do we need? */
|
||||
|
||||
|
@ -70,7 +83,14 @@ recordid rallocMany(int parentPage, lsn_t lsn, int recordSize, int recordCount)
|
|||
/ (physical_size + SLOTTED_PAGE_OVERHEAD_PER_RECORD); /* we need to take the floor */
|
||||
|
||||
int number_of_pages = (int)ceil( (double)recordCount / (double)records_per_page); /* need to take ceiling here */
|
||||
|
||||
|
||||
Page p;
|
||||
byte buffer[PAGE_SIZE];
|
||||
p.memAddr = buffer;
|
||||
p.rwlatch = initlock();
|
||||
p.loadlatch = initlock();
|
||||
|
||||
|
||||
if(number_of_pages > 1) {
|
||||
|
||||
int level = calculate_level(number_of_pages);
|
||||
|
@ -86,13 +106,14 @@ recordid rallocMany(int parentPage, lsn_t lsn, int recordSize, int recordCount)
|
|||
}
|
||||
|
||||
int newPageCount = (int)ceil((double)recordCount / (double)next_level_records_per_page);
|
||||
int firstChildPage = pageAllocMultiple(newPageCount);
|
||||
|
||||
int firstChildPage = TpageAllocMany(xid, newPageCount, SLOTTED_PAGE);/*pageAllocMultiple(newPageCount); */
|
||||
int tmpRecordCount = recordCount;
|
||||
int thisChildPage = firstChildPage;
|
||||
|
||||
while(tmpRecordCount > 0) {
|
||||
|
||||
rallocMany(thisChildPage, lsn, recordSize, min(tmpRecordCount, next_level_records_per_page));
|
||||
__rallocMany(xid, thisChildPage, recordSize, min(tmpRecordCount, next_level_records_per_page));
|
||||
tmpRecordCount -= next_level_records_per_page;
|
||||
thisChildPage ++;
|
||||
|
||||
|
@ -102,60 +123,78 @@ recordid rallocMany(int parentPage, lsn_t lsn, int recordSize, int recordCount)
|
|||
|
||||
tmpRecordCount = recordCount;
|
||||
|
||||
Page * p = loadPage(parentPage);
|
||||
|
||||
writelock(p->rwlatch, 99);
|
||||
|
||||
indirectInitialize(p, level);
|
||||
indirectInitialize(&p, level);
|
||||
|
||||
int i = 0;
|
||||
|
||||
for(tmpRecordCount = recordCount; tmpRecordCount > 0; tmpRecordCount -= next_level_records_per_page) {
|
||||
|
||||
*page_ptr(p, i) = firstChildPage + i;
|
||||
*page_ptr(&p, i) = firstChildPage + i;
|
||||
if(i) {
|
||||
*maxslot_ptr(p, i) = *maxslot_ptr(p, i-1) + min(tmpRecordCount, next_level_records_per_page);
|
||||
*maxslot_ptr(&p, i) = *maxslot_ptr(&p, i-1) + min(tmpRecordCount+1, next_level_records_per_page);
|
||||
} else {
|
||||
*maxslot_ptr(p, i) = min(tmpRecordCount, next_level_records_per_page);
|
||||
*maxslot_ptr(&p, i) = min(tmpRecordCount+1, next_level_records_per_page);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
assert(i == newPageCount);
|
||||
|
||||
pageWriteLSN(p, lsn);
|
||||
|
||||
unlock(p->rwlatch);
|
||||
releasePage(p);
|
||||
|
||||
rid.page = parentPage;
|
||||
rid.slot = RECORD_ARRAY;
|
||||
rid.size = recordSize;
|
||||
|
||||
} else {
|
||||
DEBUG("recordsize = %d, recordCount = %d, level = 0 (don't need indirect pages)\n", recordSize, recordCount);
|
||||
|
||||
Page * p = loadPage(parentPage);
|
||||
|
||||
writelock(p->rwlatch, 127);
|
||||
|
||||
pageInitialize(p);
|
||||
|
||||
unlock(p->rwlatch);
|
||||
|
||||
pageInitialize(&p);
|
||||
p.id = parentPage;
|
||||
for(int i = 0; i < recordCount; i++) {
|
||||
pageRalloc(p, recordSize);
|
||||
/* Normally, we would worry that the page id isn't set, but
|
||||
we're discarding the recordid returned by page ralloc
|
||||
anyway. */
|
||||
pageRalloc(&p, recordSize);
|
||||
}
|
||||
|
||||
writelock(p->rwlatch, 127);
|
||||
pageWriteLSN(p, lsn);
|
||||
unlock(p->rwlatch);
|
||||
|
||||
|
||||
releasePage(p);
|
||||
rid.page = parentPage;
|
||||
rid.slot = RECORD_ARRAY;
|
||||
rid.size = recordSize;
|
||||
}
|
||||
|
||||
TpageSet(xid, parentPage, &p);
|
||||
|
||||
rid.page = parentPage;
|
||||
rid.slot = RECORD_ARRAY;
|
||||
rid.size = recordSize;
|
||||
|
||||
deletelock(p.rwlatch);
|
||||
deletelock(p.loadlatch);
|
||||
|
||||
return rid;
|
||||
}
|
||||
|
||||
unsigned int indirectPageRecordCount(recordid rid) {
|
||||
Page * p = loadPage(rid.page);
|
||||
int i = 0;
|
||||
unsigned int ret;
|
||||
if(*page_type_ptr(p) == INDIRECT_PAGE) {
|
||||
|
||||
while(*maxslot_ptr(p, i) > 0) {
|
||||
i++;
|
||||
}
|
||||
if(!i) {
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = (*maxslot_ptr(p, i-1)) - 1;
|
||||
}
|
||||
} else if (*page_type_ptr(p) == SLOTTED_PAGE) {
|
||||
|
||||
int numslots = *numslots_ptr(p);
|
||||
ret = 0;
|
||||
for(int i = 0; i < numslots; i++) {
|
||||
if(isValidSlot(p, i)) {
|
||||
ret++;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
printf("Unknown page type in indirectPageRecordCount()\n");
|
||||
fflush(NULL);
|
||||
abort();
|
||||
}
|
||||
releasePage(p);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,8 @@ BEGIN_C_DECLS
|
|||
*/
|
||||
recordid dereferenceRID(recordid rid);
|
||||
void indirectInitialize(Page * p, int height);
|
||||
recordid rallocMany(int parentPage, lsn_t lsn, int recordSize, int recordCount);
|
||||
recordid rallocMany(/*int parentPage, lsn_t lsn,*/int xid, int recordSize, int recordCount);
|
||||
unsigned int indirectPageRecordCount(recordid rid);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
|
|
|
@ -207,6 +207,8 @@ recordid pageSlotRalloc(Page * page, lsn_t lsn, recordid rid) {
|
|||
|
||||
}
|
||||
|
||||
pageWriteLSN(page, lsn);
|
||||
|
||||
writeunlock(page->rwlatch);
|
||||
|
||||
return rid;
|
||||
|
|
|
@ -11,11 +11,13 @@
|
|||
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include "page/indirect.h"
|
||||
|
||||
TransactionLog XactionTable[MAX_TRANSACTIONS];
|
||||
int numActiveXactions = 0;
|
||||
int xidCount = 0;
|
||||
|
||||
|
||||
/**
|
||||
Locking for transactional2.c works as follows:
|
||||
|
||||
|
@ -34,11 +36,15 @@ void setupOperationsTable() {
|
|||
operationsTable[OPERATION_SET] = getSet();
|
||||
operationsTable[OPERATION_INCREMENT] = getIncrement();
|
||||
operationsTable[OPERATION_DECREMENT] = getDecrement();
|
||||
operationsTable[OPERATION_ALLOC] = getAlloc();
|
||||
operationsTable[OPERATION_PREPARE] = getPrepare();
|
||||
/* operationsTable[OPERATION_LHINSERT] = getLHInsert();
|
||||
operationsTable[OPERATION_LHREMOVE] = getLHRemove(); */
|
||||
operationsTable[OPERATION_ALLOC] = getAlloc();
|
||||
operationsTable[OPERATION_DEALLOC] = getDealloc();
|
||||
operationsTable[OPERATION_PAGE_ALLOC] = getPageAlloc();
|
||||
operationsTable[OPERATION_PAGE_DEALLOC] = getPageDealloc();
|
||||
operationsTable[OPERATION_PAGE_SET] = getPageSet();
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -105,6 +111,12 @@ void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
|||
|
||||
p = loadPage(rid.page);
|
||||
|
||||
if(*page_type_ptr(p) == INDIRECT_PAGE) {
|
||||
releasePage(p);
|
||||
rid = dereferenceRID(rid);
|
||||
p = loadPage(rid.page);
|
||||
}
|
||||
|
||||
e = LogUpdate(&XactionTable[xid % MAX_TRANSACTIONS], p, rid, op, dat);
|
||||
|
||||
assert(XactionTable[xid % MAX_TRANSACTIONS].prevLSN == e->LSN);
|
||||
|
@ -120,7 +132,16 @@ void Tupdate(int xid, recordid rid, const void *dat, int op) {
|
|||
|
||||
void Tread(int xid, recordid rid, void * dat) {
|
||||
Page * p = loadPage(rid.page);
|
||||
readRecord(xid, p, rid, dat);
|
||||
if(*page_type_ptr(p) == SLOTTED_PAGE) {
|
||||
readRecord(xid, p, rid, dat);
|
||||
} else if(*page_type_ptr(p) == INDIRECT_PAGE) {
|
||||
releasePage(p);
|
||||
rid = dereferenceRID(rid);
|
||||
p = loadPage(rid.page);
|
||||
readRecord(xid, p, rid, dat);
|
||||
} else {
|
||||
abort();
|
||||
}
|
||||
releasePage(p);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
INCLUDES = @CHECK_CFLAGS@
|
||||
if HAVE_CHECK
|
||||
## Had to disable check_lht because lht needs to be rewritten.
|
||||
TESTS = check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect
|
||||
TESTS = check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect check_lladdhash
|
||||
else
|
||||
TESTS =
|
||||
endif
|
||||
noinst_PROGRAMS = $(TESTS)
|
||||
LDADD = @CHECK_LIBS@ $(top_builddir)/src/lladd/liblladd.a $(top_builddir)/src/pbl/libpbl.a $(top_builddir)/src/libdfa/librw.a #-lefence
|
||||
CLEANFILES = check_lht.log check_logEntry.log storefile.txt logfile.txt blob0_file.txt blob1_file.txt check_blobRecovery.log check_logWriter.log check_operations.log check_recovery.log check_transactional2.log check_page.log check_bufferManager.log check_indirect.log
|
||||
CLEANFILES = check_lht.log check_logEntry.log storefile.txt logfile.txt blob0_file.txt blob1_file.txt check_blobRecovery.log check_logWriter.log check_operations.log check_recovery.log check_transactional2.log check_page.log check_bufferManager.log check_indirect.log check_bufferMananger.log check_lladdhash.log
|
||||
AM_CFLAGS= -g -Wall -pedantic -std=gnu99
|
||||
|
||||
|
|
|
@ -88,13 +88,15 @@ START_TEST(indirectCalculateLevelTest)
|
|||
}
|
||||
END_TEST
|
||||
|
||||
|
||||
START_TEST(indirectAlloc) {
|
||||
Tinit();
|
||||
int xid = Tbegin();
|
||||
int page;
|
||||
recordid rid = rallocMany(xid, 1, 255);
|
||||
|
||||
page = rid.page;
|
||||
|
||||
int page = pageAllocMultiple(1);
|
||||
|
||||
recordid rid = rallocMany(page, 1, 1, 255);
|
||||
|
||||
fail_unless(rid.page == page, NULL);
|
||||
fail_unless(rid.slot == RECORD_ARRAY, NULL);
|
||||
fail_unless(rid.size == 1, NULL);
|
||||
|
@ -111,9 +113,10 @@ START_TEST(indirectAlloc) {
|
|||
|
||||
/* ------------------------------- */
|
||||
|
||||
page = pageAllocMultiple(1);
|
||||
|
||||
rid = rallocMany(page, 1, 2000, 255);
|
||||
rid = rallocMany(xid, 2000, 255);
|
||||
|
||||
page = rid.page;
|
||||
|
||||
fail_unless(rid.page == page, NULL);
|
||||
fail_unless(rid.slot == RECORD_ARRAY, NULL);
|
||||
|
@ -135,9 +138,9 @@ START_TEST(indirectAlloc) {
|
|||
|
||||
/*----------------- */
|
||||
|
||||
page = pageAllocMultiple(1);
|
||||
rid = rallocMany(xid, 2, 1000000);
|
||||
|
||||
rid = rallocMany(page, 1, 2, 1000000);
|
||||
page = rid.page;
|
||||
|
||||
fail_unless(rid.page == page, NULL);
|
||||
fail_unless(rid.slot == RECORD_ARRAY, NULL);
|
||||
|
@ -149,25 +152,29 @@ START_TEST(indirectAlloc) {
|
|||
|
||||
assert(page_type == INDIRECT_PAGE);
|
||||
|
||||
fail_unless(page_type == INDIRECT_PAGE, NULL);
|
||||
|
||||
fail_unless(page_type == INDIRECT_PAGE, NULL);
|
||||
|
||||
|
||||
|
||||
printf("{page = %d, slot = %d, size = %ld}\n", rid.page, rid.slot, rid.size);
|
||||
|
||||
releasePage(p);
|
||||
|
||||
Tcommit(xid);
|
||||
|
||||
Tdeinit();
|
||||
|
||||
|
||||
} END_TEST
|
||||
|
||||
START_TEST(indirectAccessDirect) {
|
||||
|
||||
Tinit();
|
||||
|
||||
int page = pageAllocMultiple(1);
|
||||
|
||||
recordid rid = rallocMany(page, 1, sizeof(int), 500);
|
||||
|
||||
int page;
|
||||
int xid = Tbegin();
|
||||
recordid rid = rallocMany(xid, sizeof(int), 500);
|
||||
page = rid.page;
|
||||
/* Make sure that it didn't create any indirect pages. */
|
||||
|
||||
Page * p = loadPage(page);
|
||||
|
@ -180,7 +187,9 @@ START_TEST(indirectAccessDirect) {
|
|||
|
||||
releasePage(p);
|
||||
|
||||
int xid = Tbegin();
|
||||
Tcommit(xid);
|
||||
|
||||
xid = Tbegin();
|
||||
|
||||
for(int i = 0; i < 500; i++) {
|
||||
rid.slot = i;
|
||||
|
@ -205,10 +214,12 @@ START_TEST(indirectAccessIndirect) {
|
|||
|
||||
Tinit();
|
||||
|
||||
int page = pageAllocMultiple(1);
|
||||
int page;
|
||||
|
||||
recordid rid = rallocMany(page, 1, sizeof(int), 500000);
|
||||
int xid = Tbegin();
|
||||
|
||||
recordid rid = rallocMany(xid, sizeof(int), 500000);
|
||||
page = rid.page;
|
||||
/* Make sure that it didn't create any indirect pages. */
|
||||
|
||||
Page * p = loadPage(page);
|
||||
|
@ -218,11 +229,11 @@ START_TEST(indirectAccessIndirect) {
|
|||
assert(page_type == INDIRECT_PAGE);
|
||||
|
||||
fail_unless(page_type == INDIRECT_PAGE, NULL);
|
||||
|
||||
|
||||
Tcommit(xid);
|
||||
xid = Tbegin();
|
||||
releasePage(p);
|
||||
|
||||
int xid = Tbegin();
|
||||
|
||||
for(int i = 0; i < 500000; i++) {
|
||||
rid.slot = i;
|
||||
Tset(xid, dereferenceRID(rid), &i);
|
||||
|
@ -240,6 +251,31 @@ START_TEST(indirectAccessIndirect) {
|
|||
|
||||
Tcommit(xid);
|
||||
|
||||
Tdeinit();
|
||||
|
||||
} END_TEST
|
||||
|
||||
/** @test check that the indirectPageRecordCount() function works
|
||||
properly for both INDIRECT_PAGES and for SLOTTED_PAGES. */
|
||||
START_TEST(indirectSizeTest) {
|
||||
|
||||
Tinit();
|
||||
|
||||
int xid = Tbegin();
|
||||
|
||||
recordid rid = rallocMany(xid, sizeof(int), 20);
|
||||
int count = indirectPageRecordCount(rid);
|
||||
assert(count == 20);
|
||||
|
||||
recordid rid2 = rallocMany(xid, sizeof(int), 5000);
|
||||
|
||||
count = indirectPageRecordCount(rid2);
|
||||
assert(count == 5000);
|
||||
|
||||
Tcommit(xid);
|
||||
|
||||
Tdeinit();
|
||||
|
||||
} END_TEST
|
||||
|
||||
|
||||
|
@ -254,6 +290,7 @@ Suite * check_suite(void) {
|
|||
tcase_add_test(tc, indirectAlloc);
|
||||
tcase_add_test(tc, indirectAccessDirect);
|
||||
tcase_add_test(tc, indirectAccessIndirect);
|
||||
tcase_add_test(tc, indirectSizeTest);
|
||||
|
||||
/* --------------------------------------------- */
|
||||
|
||||
|
|
152
test/lladd/check_lladdhash.c
Normal file
152
test/lladd/check_lladdhash.c
Normal file
|
@ -0,0 +1,152 @@
|
|||
|
||||
/*---
|
||||
This software is copyrighted by the Regents of the University of
|
||||
California, and other parties. The following terms apply to all files
|
||||
associated with the software unless explicitly disclaimed in
|
||||
individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose,
|
||||
provided that existing copyright notices are retained in all copies
|
||||
and that this notice is included verbatim in any distributions. No
|
||||
written agreement, license, or royalty fee is required for any of the
|
||||
authorized uses. Modifications to this software may be copyrighted by
|
||||
their authors and need not follow the licensing terms described here,
|
||||
provided that the new terms are clearly indicated on the first page of
|
||||
each file where they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
|
||||
NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND
|
||||
THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE
|
||||
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights" in
|
||||
the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you are
|
||||
acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
---*/
|
||||
|
||||
#include <config.h>
|
||||
#include <check.h>
|
||||
#include "../check_includes.h"
|
||||
|
||||
#include <lladd/transactional.h>
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#define LOG_NAME "check_lladdhash.log"
|
||||
|
||||
/** @test
|
||||
executes each of the insert / remove / lookup operations a few times.
|
||||
*/
|
||||
#define NUM_BUCKETS 10
|
||||
#define NUM_ENTRIES 100
|
||||
|
||||
START_TEST(simpleHashTest)
|
||||
{
|
||||
Tinit();
|
||||
|
||||
int xid = Tbegin();
|
||||
|
||||
recordid hashRoot = lHtCreate(xid, NUM_BUCKETS);
|
||||
|
||||
lladdHash_t * hash = lHtOpen(xid, hashRoot);
|
||||
|
||||
for(int i = 0; i < NUM_ENTRIES; i++) {
|
||||
recordid rid;
|
||||
rid.page=i+1;
|
||||
rid.slot=i+1;
|
||||
rid.size=i+1;
|
||||
|
||||
assert(isNullRecord(lHtInsert(xid, hash, &i, sizeof(int), rid)));
|
||||
assert(!isNullRecord(lHtInsert(xid, hash, &i, sizeof(int), rid)));
|
||||
|
||||
}
|
||||
|
||||
for(int i = 0; i < NUM_ENTRIES; i+=10) {
|
||||
recordid rid = lHtRemove(xid, hash, &i, sizeof(int));
|
||||
assert(rid.page == (i+1));
|
||||
assert(rid.slot == (i+1));
|
||||
assert(rid.size == (i+1));
|
||||
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
|
||||
}
|
||||
|
||||
for(int i = 0; i < NUM_ENTRIES; i++) {
|
||||
if(i % 10) {
|
||||
recordid rid = lHtLookup(xid, hash, &i, sizeof(int));
|
||||
assert(rid.page == (i+1));
|
||||
assert(rid.slot == (i+1));
|
||||
assert(rid.size == (i+1));
|
||||
} else {
|
||||
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
|
||||
}
|
||||
}
|
||||
|
||||
Tcommit(xid);
|
||||
xid = Tbegin();
|
||||
for(int i = 0; i < NUM_ENTRIES; i++) {
|
||||
if(i % 10) {
|
||||
recordid rid = lHtRemove(xid, hash, &i, sizeof(int));
|
||||
assert(rid.page == (i+1));
|
||||
assert(rid.slot == (i+1));
|
||||
assert(rid.size == (i+1));
|
||||
} else {
|
||||
assert(isNullRecord(lHtRemove(xid, hash, &i, sizeof(int))));
|
||||
}
|
||||
}
|
||||
|
||||
for(int i = 0; i < NUM_ENTRIES; i++) {
|
||||
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
|
||||
}
|
||||
|
||||
Tabort(xid);
|
||||
|
||||
for(int i = 0; i < NUM_ENTRIES; i++) {
|
||||
if(i % 10) {
|
||||
recordid rid = lHtLookup(xid, hash, &i, sizeof(int));
|
||||
assert(rid.page == (i+1));
|
||||
assert(rid.slot == (i+1));
|
||||
assert(rid.size == (i+1));
|
||||
} else {
|
||||
assert(isNullRecord(lHtLookup(xid, hash, &i, sizeof(int))));
|
||||
}
|
||||
}
|
||||
|
||||
Tdeinit();
|
||||
|
||||
}
|
||||
END_TEST
|
||||
|
||||
Suite * check_suite(void) {
|
||||
Suite *s = suite_create("lladdHash");
|
||||
/* Begin a new test */
|
||||
TCase *tc = tcase_create("simple");
|
||||
|
||||
/* Sub tests are added, one per line, here */
|
||||
|
||||
tcase_add_test(tc, simpleHashTest);
|
||||
|
||||
/* --------------------------------------------- */
|
||||
|
||||
tcase_add_checked_fixture(tc, setup, teardown);
|
||||
|
||||
suite_add_tcase(s, tc);
|
||||
return s;
|
||||
}
|
||||
|
||||
#include "../check_setup.h"
|
Loading…
Reference in a new issue