use pageid instead of int; Tupdate now takes a pageid instead of a recordid

This commit is contained in:
Sears Russell 2008-10-03 02:42:25 +00:00
parent c0f7336772
commit 6354fe8a2f
37 changed files with 371 additions and 403 deletions

View file

@ -104,7 +104,7 @@ int openInterpreter(FILE * in, FILE * out, recordid hash) {
TregionReadBoundaryTag(-1,pid,&tag);
int done = 0;
while(!done) {
fprintf(out, "\tpageid=%lld\ttype=%d\tsize=%d\n", pid, tag.allocation_manager, tag.size);
fprintf(out, "\tpageid=%lld\ttype=%d\tsize=%lld\n", pid, tag.allocation_manager, tag.size);
if(tag.size == UINT32_MAX) { fprintf(out, "\t[EOF]\n"); }
int err = TregionNextBoundaryTag(-1,&pid,&tag,0);
if(!err) { done = 1; }

View file

@ -81,7 +81,7 @@ static int cmpFreespace(const void * ap, const void * bp, const void * param) {
}
}
inline static availablePage* getAvailablePage(allocationPolicy * ap, int pageid) {
inline static availablePage* getAvailablePage(allocationPolicy * ap, pageid_t pageid) {
return (availablePage*) LH_ENTRY(find)(ap->allPages, &pageid, sizeof(pageid));
}
@ -145,7 +145,7 @@ inline static void lockAlloced(allocationPolicy * ap, int xid, availablePage
int * xidp = malloc(sizeof(int));
*xidp = xid;
LH_ENTRY(insert)(ap->pageOwners, &(p->pageid), sizeof(int), xidp);
LH_ENTRY(insert)(ap->pageOwners, &(p->pageid), sizeof(p->pageid), xidp);
insert_xidAlloced(ap, xid, p);
}
@ -170,7 +170,7 @@ inline static void lockDealloced(allocationPolicy * ap, int xid, availablePage *
// xid should own it
lockAlloced(ap, xid, p);
} else if(p->lockCount == 1) {
int * xidp = LH_ENTRY(find)(ap->pageOwners, &(p->pageid), sizeof(int));
int * xidp = LH_ENTRY(find)(ap->pageOwners, &(p->pageid), sizeof(p->pageid));
if(!xidp) {
// The only active transaction that touched this page deallocated from it,
@ -302,10 +302,10 @@ availablePage * allocationPolicyFindPage(allocationPolicy * ap, int xid, int fre
return (availablePage*) ret;
}
void allocationPolicyAllocedFromPage(allocationPolicy *ap, int xid, int pageid) {
availablePage * p = getAvailablePage(ap, pageid);
void allocationPolicyAllocedFromPage(allocationPolicy *ap, int xid, pageid_t page) {
availablePage * p = getAvailablePage(ap, page);
const availablePage * check1 = RB_ENTRY(find)(p, ap->availablePages);
int * xidp = LH_ENTRY(find)(ap->pageOwners, &(pageid), sizeof(pageid));
int * xidp = LH_ENTRY(find)(ap->pageOwners, &(page), sizeof(page));
if(!(xidp || check1)) {
// the page is not available, and is not owned.
// this can happen if more than one transaction deallocs from the same page
@ -324,9 +324,9 @@ void allocationPolicyAllocedFromPage(allocationPolicy *ap, int xid, int pageid)
}
}
void allocationPolicyLockPage(allocationPolicy *ap, int xid, int pageid) {
void allocationPolicyLockPage(allocationPolicy *ap, int xid, pageid_t page) {
availablePage * p = getAvailablePage(ap, pageid);
availablePage * p = getAvailablePage(ap, page);
lockDealloced(ap, xid, p);
}
@ -334,7 +334,7 @@ void allocationPolicyLockPage(allocationPolicy *ap, int xid, int pageid) {
void allocationPolicyTransactionCompleted(allocationPolicy * ap, int xid) {
struct RB_ENTRY(tree) * locks = LH_ENTRY(find)(ap->xidAlloced, &xid, sizeof(int));
struct RB_ENTRY(tree) * locks = LH_ENTRY(find)(ap->xidAlloced, &xid, sizeof(xid));
if(locks) {
@ -344,12 +344,12 @@ void allocationPolicyTransactionCompleted(allocationPolicy * ap, int xid) {
unlockAlloced(ap, xid, (next)); // This is really inefficient. (We're wasting hashtable lookups. Also, an iterator would be faster.)
}
LH_ENTRY(remove)(ap->xidAlloced, &xid, sizeof(int));
LH_ENTRY(remove)(ap->xidAlloced, &xid, sizeof(xid));
RB_ENTRY(destroy)(locks);
}
locks = LH_ENTRY(find)(ap->xidDealloced, &xid, sizeof(int));
locks = LH_ENTRY(find)(ap->xidDealloced, &xid, sizeof(xid));
if(locks) {
availablePage * next;
@ -358,7 +358,7 @@ void allocationPolicyTransactionCompleted(allocationPolicy * ap, int xid) {
unlockDealloced(ap, xid, (availablePage*)next); // This is really inefficient. (We're wasting hashtable lookups. Also, an iterator would be faster.)
}
LH_ENTRY(remove)(ap->xidDealloced, &xid, sizeof(int));
LH_ENTRY(remove)(ap->xidDealloced, &xid, sizeof(xid));
RB_ENTRY(destroy)(locks);
}
@ -374,7 +374,7 @@ void allocationPolicyUpdateFreespaceUnlockedPage(allocationPolicy * ap, availabl
}
void allocationPolicyUpdateFreespaceLockedPage(allocationPolicy * ap, int xid, availablePage * key, int newFree) {
struct RB_ENTRY(tree) * locks = LH_ENTRY(find)(ap->xidAlloced, &xid, sizeof(int));
struct RB_ENTRY(tree) * locks = LH_ENTRY(find)(ap->xidAlloced, &xid, sizeof(xid));
assert(key);
availablePage * p = (availablePage*) RB_ENTRY(delete)(key, locks);
assert(p); // sometimes fails

View file

@ -6,7 +6,7 @@
void allocBlob(int xid, recordid rid) {
assert(rid.size>0);
int pageCount = (rid.size / USABLE_SIZE_OF_PAGE) + ((rid.size % USABLE_SIZE_OF_PAGE) ? 1 : 0);
pageid_t pageCount = (rid.size / USABLE_SIZE_OF_PAGE) + ((rid.size % USABLE_SIZE_OF_PAGE) ? 1 : 0);
long startPage = TpageAllocMany(xid, pageCount);
blob_record_t rec;
rec.offset = startPage;
@ -19,7 +19,7 @@ void allocBlob(int xid, recordid rid) {
}
void readBlob(int xid, Page * p2, recordid rid, byte * buf) {
int chunk;
pageid_t chunk;
recordid rawRid = rid;
rawRid.size = BLOB_SLOT;
byte * pbuf = alloca(PAGE_SIZE);
@ -45,7 +45,7 @@ void writeBlob(int xid, Page * p, recordid rid, const void* dat) {
stasis_record_read(xid, p, r, (byte*)&rec);
assert(rec.offset);
int64_t chunk = 0;
pageid_t chunk = 0;
for(; (chunk+1) * USABLE_SIZE_OF_PAGE < rid.size; chunk++) {
Page * cnk = loadPage(xid, rec.offset+chunk);
writelock(cnk->rwlatch,0);

View file

@ -61,6 +61,7 @@ terms specified in this license.
#endif
#include <stasis/common.h>
#include <stasis/latches.h>
#include <assert.h>
@ -111,8 +112,8 @@ static Page * dummy_page;
static pthread_key_t lastPage;
static void bufManBufDeinit();
static compensated_function Page *bufManLoadPage(int xid, int pageid);
static compensated_function Page *bufManLoadUninitPage(int xid, int pageid);
static compensated_function Page *bufManLoadPage(int xid, pageid_t pageid);
static compensated_function Page *bufManLoadUninitPage(int xid, pageid_t pageid);
static void bufManReleasePage (Page * p);
static void bufManSimulateBufferManagerCrash();
@ -138,7 +139,7 @@ static int bufManBufInit() {
Page *first;
first = pageMalloc();
pageFree(first, 0);
LH_ENTRY(insert)(activePages, &first->id, sizeof(int), first);
LH_ENTRY(insert)(activePages, &first->id, sizeof(first->id), first);
pageRead(first);
pageCacheInit(first);
@ -206,12 +207,12 @@ static void bufManReleasePage (Page * p) {
}
static Page * getPage(int pageid, int locktype, int uninitialized) {
static Page * getPage(pageid_t pageid, int locktype, int uninitialized) {
Page * ret;
int spin = 0;
pthread_mutex_lock(&loadPagePtr_mutex);
ret = LH_ENTRY(find)(activePages, &pageid, sizeof(int));
ret = LH_ENTRY(find)(activePages, &pageid, sizeof(pageid));
if(ret) {
#ifdef PROFILE_LATCHES_WRITE_ONLY
@ -245,7 +246,7 @@ static Page * getPage(int pageid, int locktype, int uninitialized) {
pthread_mutex_unlock(&loadPagePtr_mutex);
sched_yield();
pthread_mutex_lock(&loadPagePtr_mutex);
ret = LH_ENTRY(find)(activePages, &pageid, sizeof(int));
ret = LH_ENTRY(find)(activePages, &pageid, sizeof(pageid));
if(ret) {
#ifdef PROFILE_LATCHES_WRITE_ONLY
@ -293,7 +294,7 @@ static Page * getPage(int pageid, int locktype, int uninitialized) {
and that will try to add an entry for pageid
c) the most recent version of this page has been
written to the OS's file cache. */
int oldid = -1;
pageid_t oldid = -1;
if( cache_state == FULL ) {
@ -341,7 +342,7 @@ static Page * getPage(int pageid, int locktype, int uninitialized) {
/* Inserting this into the cache before releasing the mutex
ensures that constraint (b) above holds. */
LH_ENTRY(insert)(activePages, &pageid, sizeof(int), ret);
LH_ENTRY(insert)(activePages, &pageid, sizeof(pageid), ret);
pthread_mutex_unlock(&loadPagePtr_mutex);
/* Could writelock(ret) go here? */
@ -367,7 +368,7 @@ static Page * getPage(int pageid, int locktype, int uninitialized) {
pthread_mutex_lock(&loadPagePtr_mutex);
LH_ENTRY(remove)(activePages, &(oldid), sizeof(int));
LH_ENTRY(remove)(activePages, &(oldid), sizeof(oldid));
/* @todo Put off putting this back into cache until we're done with
it. -- This could cause the cache to empty out if the ratio of
@ -412,7 +413,7 @@ static Page * getPage(int pageid, int locktype, int uninitialized) {
#ifdef PROFILE_LATCHES_WRITE_ONLY
compensated_function Page * __profile_loadPage(int xid, int pageid, char * file, int line) {
compensated_function Page * __profile_loadPage(int xid, pageid_t pageid, char * file, int line) {
Page * ret = loadPage(xid, pageid);
@ -474,7 +475,7 @@ compensated_function void __profile_releasePage(Page * p) {
#endif
static compensated_function Page *bufManLoadPage(int xid, int pageid) {
static compensated_function Page *bufManLoadPage(int xid, pageid_t pageid) {
Page * ret = pthread_getspecific(lastPage);
@ -505,7 +506,7 @@ static compensated_function Page *bufManLoadPage(int xid, int pageid) {
return ret;
}
static compensated_function Page *bufManLoadUninitPage(int xid, int pageid) {
static compensated_function Page *bufManLoadUninitPage(int xid, pageid_t pageid) {
Page * ret = pthread_getspecific(lastPage);
@ -536,8 +537,8 @@ static compensated_function Page *bufManLoadUninitPage(int xid, int pageid) {
return ret;
}
Page * (*loadPageImpl)(int xid, int pageid) = 0;
Page * (*loadUninitPageImpl)(int xid, int pageid) = 0;
Page * (*loadPageImpl)(int xid, pageid_t pageid) = 0;
Page * (*loadUninitPageImpl)(int xid, pageid_t pageid) = 0;
void (*releasePageImpl)(Page * p) = 0;
void (*writeBackPage)(Page * p) = 0;
void (*forcePages)() = 0;
@ -545,7 +546,7 @@ void (*forcePageRange)(pageid_t start, pageid_t stop) = 0;
void (*bufDeinit)() = 0;
void (*simulateBufferManagerCrash)() = 0;
Page * loadPage(int xid, int pageid) {
Page * loadPage(int xid, pageid_t pageid) {
try_ret(NULL) {
// This lock is released at Tcommit()
if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); }
@ -554,7 +555,7 @@ Page * loadPage(int xid, int pageid) {
return loadPageImpl(xid, pageid);
}
Page * loadUninitializedPage(int xid, int pageid) {
Page * loadUninitializedPage(int xid, pageid_t pageid) {
try_ret(NULL) {
// This lock is released at Tcommit()
if(globalLockManager.readLockPage) { globalLockManager.readLockPage(xid, pageid); }

View file

@ -23,10 +23,10 @@ static pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t readComplete = PTHREAD_COND_INITIALIZER;
static pthread_cond_t needFree = PTHREAD_COND_INITIALIZER;
static int freeLowWater;
static int freeListLength;
static int freeCount;
static int pageCount;
static pageid_t freeLowWater;
static pageid_t freeListLength;
static pageid_t freeCount;
static pageid_t pageCount;
static Page ** freeList;
@ -53,7 +53,7 @@ static void pageSetNode(void * page, node_t * n, void * ignore) {
#ifdef LONG_RUN
inline static void checkPageState(Page * p) {
Page * check = LH_ENTRY(find)(cachedPages, &(p->id), sizeof(int));
Page * check = LH_ENTRY(find)(cachedPages, &(p->id), sizeof(p->id));
if(check) {
int pending = *pagePendingPtr(p);
int pinned = *pagePinCountPtr(p);
@ -63,7 +63,7 @@ inline static void checkPageState(Page * p) {
assert(!pageGetNode(p,0));
}
int notfound = 1;
for(int i = 0; i < freeCount; i++) {
for(pageid_t i = 0; i < freeCount; i++) {
if(freeList[i] == p) { notfound = 0; }
}
assert(notfound);
@ -72,7 +72,7 @@ inline static void checkPageState(Page * p) {
assert(!*pagePendingPtr(p));
assert(!*pagePinCountPtr(p));
int found = 0;
for(int i = 0; i < freeCount; i++) {
for(pageid_t i = 0; i < freeCount; i++) {
if(freeList[i] == p) { found = 1; }
}
assert(found);
@ -105,7 +105,7 @@ inline static Page * writeBackOnePage() {
checkPageState(victim);
lru->remove(lru, victim);
Page * old = LH_ENTRY(remove)(cachedPages, &(victim->id), sizeof(int));
Page * old = LH_ENTRY(remove)(cachedPages, &(victim->id), sizeof(victim->id));
assert(old == victim);
// printf("Write(%ld)\n", (long)victim->id);
@ -183,7 +183,7 @@ static void * writeBackWorker(void * ignored) {
return 0;
}
static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized) {
static Page * bhLoadPageImpl_helper(int xid, const pageid_t pageid, int uninitialized) {
// Note: Calls to loadlatch in this function violate lock order, but
// should be safe, since we make sure no one can have a writelock
@ -194,7 +194,7 @@ static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized
pthread_mutex_lock(&mut);
// Is the page in cache?
Page * ret = LH_ENTRY(find)(cachedPages, &pageid,sizeof(int));
Page * ret = LH_ENTRY(find)(cachedPages, &pageid,sizeof(pageid));
do {
@ -205,7 +205,7 @@ static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized
if(*pagePendingPtr(ret)) {
pthread_cond_wait(&readComplete, &mut);
if(ret->id != pageid) {
ret = LH_ENTRY(find)(cachedPages, &pageid, sizeof(int));
ret = LH_ENTRY(find)(cachedPages, &pageid, sizeof(pageid));
}
} else {
#ifdef LATCH_SANITY_CHECKING
@ -231,7 +231,7 @@ static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized
Page * ret2 = getFreePage();
// Did some other thread put the page in cache for us?
ret = LH_ENTRY(find)(cachedPages, &pageid,sizeof(int));
ret = LH_ENTRY(find)(cachedPages, &pageid,sizeof(pageid));
if(!ret) {
// No, so we're ready to add it.
@ -257,7 +257,7 @@ static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized
// Add a pending entry to cachedPages to block like-minded threads and writeback
(*pagePendingPtr(ret)) = (void*)1;
check = LH_ENTRY(insert)(cachedPages,&pageid,sizeof(int), ret);
check = LH_ENTRY(insert)(cachedPages,&pageid,sizeof(pageid), ret);
assert(!check);
ret->id = pageid;
@ -299,10 +299,10 @@ static Page * bhLoadPageImpl_helper(int xid, const int pageid, int uninitialized
return ret;
}
static Page * bhLoadPageImpl(int xid, const int pageid) {
static Page * bhLoadPageImpl(int xid, const pageid_t pageid) {
return bhLoadPageImpl_helper(xid,pageid,0);
}
static Page * bhLoadUninitPageImpl(int xid, const int pageid) {
static Page * bhLoadUninitPageImpl(int xid, const pageid_t pageid) {
return bhLoadPageImpl_helper(xid,pageid,1); // 1 means dont care about preimage of page.
}

View file

@ -9,15 +9,15 @@
#include <stasis/page.h>
static Page ** pageMap;
static int pageCount;
static pageid_t pageCount;
static pthread_mutex_t pageArray_mut = PTHREAD_MUTEX_INITIALIZER;
static Page * paLoadPage(int xid, int pageid) {
static Page * paLoadPage(int xid, pageid_t pageid) {
pthread_mutex_lock(&pageArray_mut);
if(pageid >= pageCount) {
pageMap = realloc(pageMap, (1+pageid) * sizeof(Page*));
for(int i = pageCount; i <= pageid; i++) {
for(pageid_t i = pageCount; i <= pageid; i++) {
pageMap[i] = 0;
}
pageCount = pageid + 1;
@ -48,7 +48,7 @@ static void paWriteBackPage(Page * p) { /* no-op */ }
static void paForcePages() { /* no-op */ }
static void paBufDeinit() {
for(int i =0; i < pageCount; i++) {
for(pageid_t i =0; i < pageCount; i++) {
if(pageMap[i]) {
deletelock(pageMap[i]->rwlatch);
deletelock(pageMap[i]->loadlatch);

View file

@ -58,7 +58,7 @@ terms specified in this license.
#include <stasis/truncation.h>
#include <stasis/page.h>
/* TODO: Combine with buffer size... */
static int nextPage = 0;
static pageid_t nextPage = 0;
static pthread_mutex_t pageMallocMutex;
static void * addressFromMalloc = 0;
@ -75,20 +75,14 @@ void bufferPoolInit() {
byte * bufferSpace ;
/*#ifdef HAVE_POSIX_MEMALIGN
int ret = posix_memalign((void*)&bufferSpace, PAGE_SIZE, PAGE_SIZE * (MAX_BUFFER_SIZE + 1));
assert(!ret);
addressFromMalloc = bufferSpace;
#else*/
bufferSpace = calloc((MAX_BUFFER_SIZE + 2), PAGE_SIZE);
assert(bufferSpace);
addressFromMalloc = bufferSpace;
bufferSpace = (byte*)(((long)bufferSpace) +
PAGE_SIZE -
(((long)bufferSpace) % PAGE_SIZE));
//#endif
for(int i = 0; i < MAX_BUFFER_SIZE+1; i++) {
for(pageid_t i = 0; i < MAX_BUFFER_SIZE+1; i++) {
pool[i].rwlatch = initlock();
pool[i].loadlatch = initlock();
pool[i].memAddr = &(bufferSpace[i*PAGE_SIZE]);
@ -97,7 +91,7 @@ void bufferPoolInit() {
}
void bufferPoolDeInit() {
for(int i = 0; i < MAX_BUFFER_SIZE+1; i++) {
for(pageid_t i = 0; i < MAX_BUFFER_SIZE+1; i++) {
deletelock(pool[i].rwlatch);
deletelock(pool[i].loadlatch);
}
@ -123,13 +117,13 @@ Page* pageMalloc() {
}
static void pageFreeNoLock(Page *p, int id) {
static void pageFreeNoLock(Page *p, pageid_t id) {
p->id = id;
p->LSN = 0;
p->dirty = 0;
}
void pageFree(Page *p, int id) {
void pageFree(Page *p, pageid_t id) {
writelock(p->rwlatch, 10);
pageFreeNoLock(p,id);
writeunlock(p->rwlatch);

View file

@ -35,20 +35,20 @@ static unsigned long thomasWangs64BitMixFunction(unsigned long key)
#endif
#endif
unsigned int max_bucket(unsigned char tableBits, unsigned int nextExtension) {
unsigned int oldTableLength = twoToThe(tableBits - 1);
uint64_t max_bucket(unsigned char tableBits, uint64_t nextExtension) {
uint64_t oldTableLength = twoToThe(tableBits - 1);
return oldTableLength + nextExtension - 1;
}
void hashGetParamsForSize(unsigned int desiredSize,
unsigned char * tableBits, unsigned int* nextExtension) {
void hashGetParamsForSize(uint64_t desiredSize,
unsigned char * tableBits, uint64_t* nextExtension) {
*tableBits = logBase2(desiredSize)+1;
*nextExtension = ((desiredSize) - twoToThe(*tableBits-1));
}
unsigned int hash(const void * val, long val_length,
unsigned char tableBits, unsigned int nextExtension) {
uint64_t hash(const void * val, uint64_t val_length,
unsigned char tableBits, uint64_t nextExtension) {
// Calculate the hash value as it was before this round of splitting.
unsigned int oldTableLength = twoToThe(tableBits - 1);
unsigned int unmixed = stasis_crc32(val, val_length, (unsigned int)-1);
@ -95,9 +95,9 @@ static const char LogTable256[] =
harness to compare logBase2Slow's output with logBase2's output,
etc...)
*/
unsigned int logBase2(unsigned int v) {
unsigned int r = 0; // r will be lg(v)
register unsigned int t, tt; // temporaries
uint32_t logBase2(uint64_t v) {
uint32_t r = 0; // r will be lg(v)
uint32_t t, tt; // temporaries
if ((tt = v >> 16))
{
@ -110,8 +110,8 @@ unsigned int logBase2(unsigned int v) {
return r;
}
unsigned long logBase2Slow(unsigned long v) {
unsigned long r = 0; // r will be lg(v)
uint32_t logBase2Slow(uint64_t v) {
uint32_t r = 0; // r will be lg(v)
while (v >>= 1) // unroll for more speed...
{

View file

@ -39,8 +39,8 @@ struct LH_ENTRY(table) {
struct LH_ENTRY(pair_t) * bucketList;
unsigned int bucketListLength;
unsigned char bucketListBits;
unsigned int bucketListNextExtension;
unsigned int occupancy;
uint64_t bucketListNextExtension;
uint64_t occupancy;
#ifdef NAIVE_LOCKING
pthread_mutex_t lock;
#endif

View file

@ -309,17 +309,17 @@ int lockManagerCommitRecords(int xid) {
return lockManagerCommitHashed(xid, sizeof(recordid));
}
compensated_function int lockManagerReadLockPage(int xid, int p) {
return lockManagerReadLockHashed(xid, (byte*)&p, sizeof(int));
compensated_function int lockManagerReadLockPage(int xid, pageid_t p) {
return lockManagerReadLockHashed(xid, (byte*)&p, sizeof(p));
}
compensated_function int lockManagerWriteLockPage(int xid, int p) {
return lockManagerWriteLockHashed(xid, (byte*)&p, sizeof(int));
compensated_function int lockManagerWriteLockPage(int xid, pageid_t p) {
return lockManagerWriteLockHashed(xid, (byte*)&p, sizeof(p));
}
int lockManagerUnlockPage(int xid, int p) {
return lockManagerUnlockHashed(xid, (byte*)&p, sizeof(int));
int lockManagerUnlockPage(int xid, pageid_t p) {
return lockManagerUnlockHashed(xid, (byte*)&p, sizeof(p));
}
int lockManagerCommitPages(int xid) {
return lockManagerCommitHashed(xid, sizeof(int));
return lockManagerCommitHashed(xid, sizeof(pageid_t));
}
LockManagerSetup globalLockManager;

View file

@ -40,7 +40,7 @@
can be optimized by exploiting physical locality. A call such as
this allows page-level locality to be established / maintained:
int page = Treserve(int xid, int size)
pageid_t page = Treserve(int xid, int size)
This would tell Talloc to treat the page as though 'size' bytes had
already been reserved. The 'free space' that Talloc () reasons
@ -95,7 +95,7 @@ typedef struct {
int64_t size;
} alloc_arg;
static int op_alloc(const LogEntry* e, Page* p) { //(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
static int op_alloc(const LogEntry* e, Page* p) {
assert(e->update.arg_size >= sizeof(alloc_arg));
const alloc_arg* arg = (const alloc_arg*)getUpdateArgs(e);
@ -118,7 +118,7 @@ static int op_alloc(const LogEntry* e, Page* p) { //(int xid, Page * p, lsn_t ls
return ret;
}
static int op_dealloc(const LogEntry* e, Page* p) { //deoperate(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
static int op_dealloc(const LogEntry* e, Page* p) {
assert(e->update.arg_size >= sizeof(alloc_arg));
const alloc_arg* arg = (const alloc_arg*)getUpdateArgs(e);
recordid rid = {
@ -134,7 +134,7 @@ static int op_dealloc(const LogEntry* e, Page* p) { //deoperate(int xid, Page *
return 0;
}
static int op_realloc(const LogEntry* e, Page* p) { //reoperate(int xid, Page *p, lsn_t lsn, recordid rid, const void * dat) {
static int op_realloc(const LogEntry* e, Page* p) {
assert(e->update.arg_size >= sizeof(alloc_arg));
const alloc_arg* arg = (const alloc_arg*)getUpdateArgs(e);
@ -186,11 +186,11 @@ Operation getRealloc() {
return o;
}
static uint64_t lastFreepage;
static pageid_t lastFreepage;
static allocationPolicy * allocPolicy;
static void registerOldRegions();
void TallocInit() {
lastFreepage = UINT64_MAX;
lastFreepage = PAGEID_T_MAX;
allocPolicy = allocationPolicyInit();
}
void TallocPostInit() {
@ -237,12 +237,12 @@ static void registerOldRegions() {
static void reserveNewRegion(int xid) {
void* nta = TbeginNestedTopAction(xid, OPERATION_NOOP, 0,0);
int firstPage = TregionAlloc(xid, TALLOC_REGION_SIZE, STORAGE_MANAGER_TALLOC);
pageid_t firstPage = TregionAlloc(xid, TALLOC_REGION_SIZE, STORAGE_MANAGER_TALLOC);
int initialFreespace = -1;
availablePage ** newPages = malloc(sizeof(availablePage*)*(TALLOC_REGION_SIZE+1));
for(int i = 0; i < TALLOC_REGION_SIZE; i++) {
for(pageid_t i = 0; i < TALLOC_REGION_SIZE; i++) {
availablePage * next = malloc(sizeof(availablePage)); // * TALLOC_REGION_SIZE);
TinitializeSlottedPage(xid, firstPage + i);
@ -331,7 +331,7 @@ compensated_function recordid Talloc(int xid, unsigned long size) {
alloc_arg a = { rid.slot, rid.size };
Tupdate(xid, rid, &a, sizeof(a), OPERATION_ALLOC);
Tupdate(xid, rid.page, &a, sizeof(a), OPERATION_ALLOC);
if(type == BLOB_SLOT) {
rid.size = size;
@ -356,7 +356,7 @@ void allocTransactionCommit(int xid) {
} compensate;
}
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size) {
compensated_function recordid TallocFromPage(int xid, pageid_t page, unsigned long size) {
short type;
if(size >= BLOB_THRESHOLD_SIZE) {
type = BLOB_SLOT;
@ -376,7 +376,7 @@ compensated_function recordid TallocFromPage(int xid, long page, unsigned long s
alloc_arg a = { rid.slot, rid.size };
Tupdate(xid, rid, &a, sizeof(a), OPERATION_ALLOC);
Tupdate(xid, rid.page, &a, sizeof(a), OPERATION_ALLOC);
if(type == BLOB_SLOT) {
rid.size = size;
@ -421,7 +421,7 @@ compensated_function void Tdealloc(int xid, recordid rid) {
unlock(p->rwlatch);
/** @todo race in Tdealloc; do we care, or is this something that the log manager should cope with? */
Tupdate(xid, rid, preimage, sizeof(alloc_arg)+rid.size, OPERATION_DEALLOC);
Tupdate(xid, rid.page, preimage, sizeof(alloc_arg)+rid.size, OPERATION_DEALLOC);
} compensate;
pthread_mutex_unlock(&talloc_mutex);
@ -451,18 +451,16 @@ compensated_function int TrecordSize(int xid, recordid rid) {
return ret;
}
void TinitializeSlottedPage(int xid, int pageid) {
void TinitializeSlottedPage(int xid, pageid_t page) {
alloc_arg a = { SLOTTED_PAGE, 0 };
recordid rid = { pageid, 0, 0 };
Tupdate(xid, rid, &a, sizeof(a), OPERATION_INITIALIZE_PAGE);
Tupdate(xid, page, &a, sizeof(a), OPERATION_INITIALIZE_PAGE);
}
void TinitializeFixedPage(int xid, int pageid, int slotLength) {
void TinitializeFixedPage(int xid, pageid_t page, int slotLength) {
alloc_arg a = { FIXED_PAGE, slotLength };
recordid rid = { pageid, 0, 0 };
Tupdate(xid, rid, &a, sizeof(a), OPERATION_INITIALIZE_PAGE);
Tupdate(xid, page, &a, sizeof(a), OPERATION_INITIALIZE_PAGE);
}
static int op_initialize_page(const LogEntry* e, Page* p) { //int xid, Page *p, lsn_t lsn, recordid rid, const void * dat) {
static int op_initialize_page(const LogEntry* e, Page* p) {
assert(e->update.arg_size == sizeof(alloc_arg));
const alloc_arg* arg = (const alloc_arg*)getUpdateArgs(e);

View file

@ -13,25 +13,25 @@
#include <math.h>
typedef struct {
int firstPage;
int initialSize;
int multiplier;
int size;
int maxOffset;
pageid_t firstPage;
pageid_t initialSize;
pageid_t multiplier;//XXX rest are not page numbers or offsets, but must all be same length
int size; // *has* to be an int; passed into OPERATION_FIXED_PAGE_ALLOC
pageid_t maxOffset;
} TarrayListParameters;
static TarrayListParameters pageToTLP(int xid, Page * p);
static int getBlockContainingOffset(TarrayListParameters tlp, int offset, int * firstSlotInBlock);
static int getBlockContainingOffset(TarrayListParameters tlp, int offset, pageid_t * firstSlotInBlock);
#define MAX_OFFSET_POSITION 3
#define FIRST_DATA_PAGE_OFFSET 4
/*----------------------------------------------------------------------------*/
compensated_function recordid TarrayListAlloc(int xid, int count, int multiplier, int size) {
compensated_function recordid TarrayListAlloc(int xid, pageid_t count, int multiplier, int size) {
int firstPage;
pageid_t firstPage;
try_ret(NULLRID) {
firstPage = TpageAllocMany(xid, count+1);
} end_ret(NULLRID);
@ -49,7 +49,7 @@ compensated_function recordid TarrayListAlloc(int xid, int count, int multiplier
rid.size = size;
rid.slot = 0;
try_ret(NULLRID) {
Tupdate(xid, rid, &tlp, sizeof(tlp), OPERATION_ARRAY_LIST_ALLOC);
Tupdate(xid, firstPage, &tlp, sizeof(tlp), OPERATION_ARRAY_LIST_ALLOC);
} end_ret(NULLRID);
return rid;
@ -62,16 +62,16 @@ static int op_array_list_alloc(const LogEntry* e, Page* p) {
const TarrayListParameters * tlp = (const TarrayListParameters*)getUpdateArgs(e);
int firstPage = tlp->firstPage;
int count = tlp->initialSize;
int multiplier = tlp->multiplier;
pageid_t firstPage = tlp->firstPage;
pageid_t count = tlp->initialSize;
pageid_t multiplier = tlp->multiplier;
int size = tlp->size;
stasis_fixed_initialize_page(p, sizeof(int), stasis_fixed_records_per_page(sizeof(int)));
stasis_fixed_initialize_page(p, sizeof(pageid_t), stasis_fixed_records_per_page(sizeof(pageid_t)));
recordid countRid, multiplierRid, slotSizeRid, maxOffset, firstDataPageRid;
countRid.page = multiplierRid.page = slotSizeRid.page = maxOffset.page = firstDataPageRid.page = p->id;
countRid.size = multiplierRid.size = slotSizeRid.size = maxOffset.size = firstDataPageRid.size = sizeof(int);
countRid.size = multiplierRid.size = slotSizeRid.size = maxOffset.size = firstDataPageRid.size = sizeof(pageid_t);
countRid.slot = 0;
multiplierRid.slot = 1;
@ -79,12 +79,12 @@ static int op_array_list_alloc(const LogEntry* e, Page* p) {
maxOffset.slot = 3;
firstDataPageRid.slot = 4;
int firstDataPage = firstPage + 1;
(*(int*)stasis_record_write_begin(e->xid, p, countRid))= count;
(*(int*)stasis_record_write_begin(e->xid, p, multiplierRid))= multiplier;
(*(int*)stasis_record_write_begin(e->xid, p, firstDataPageRid))= firstDataPage;
(*(int*)stasis_record_write_begin(e->xid, p, slotSizeRid))= size;
(*(int*)stasis_record_write_begin(e->xid, p, maxOffset))= -1;
pageid_t firstDataPage = firstPage + 1;
(*(pageid_t*)stasis_record_write_begin(e->xid, p, countRid))= count;
(*(pageid_t*)stasis_record_write_begin(e->xid, p, multiplierRid))= multiplier;
(*(pageid_t*)stasis_record_write_begin(e->xid, p, firstDataPageRid))= firstDataPage;
(*(pageid_t*)stasis_record_write_begin(e->xid, p, slotSizeRid))= size;
(*(pageid_t*)stasis_record_write_begin(e->xid, p, maxOffset))= -1;
*stasis_page_type_ptr(p) = ARRAY_LIST_PAGE;
@ -124,7 +124,7 @@ compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
releasePage(p);
p = NULL;
int lastCurrentBlock;
int lastCurrentBlock; // just a slot on a page
if(tlp.maxOffset == -1) {
lastCurrentBlock = -1;
} else{
@ -136,29 +136,25 @@ compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
recordid tmp; /* recordid of slot in base page that holds new block. */
tmp.page = rid.page;
tmp.size = sizeof(int);
tmp.size = sizeof(pageid_t);
recordid tmp2; /* recordid of newly created pages. */
tmp2.slot = 0;
tmp2.size = tlp.size;
/* Iterate over the (small number) of indirection blocks that need to be updated */
try_ret(compensation_error()) {
for(int i = lastCurrentBlock+1; i <= lastNewBlock; i++) {
for(pageid_t i = lastCurrentBlock+1; i <= lastNewBlock; i++) {
/* Alloc block i */
int blockSize = tlp.initialSize * pow(tlp.multiplier, i);
int newFirstPage = TpageAllocMany(xid, blockSize);
pageid_t blockSize = tlp.initialSize * pow(tlp.multiplier, i);
pageid_t newFirstPage = TpageAllocMany(xid, blockSize);
DEBUG("block %d\n", i);
tmp.slot = i + FIRST_DATA_PAGE_OFFSET;
/* Iterate over the (large number) of new blocks, clearing their contents */
/* @todo XXX arraylist generates N log entries initing pages.
It should generate 1 entry. (Need better LSN handling first.)*/
{
recordid newpage;
newpage.slot = 0;
newpage.size = 0;
for(int i = newFirstPage; i < newFirstPage + blockSize; i++) {
newpage.page = i;
TupdateRaw(xid, newpage, &tlp.size, sizeof(tlp.size), OPERATION_FIXED_PAGE_ALLOC);
for(pageid_t i = newFirstPage; i < newFirstPage + blockSize; i++) {
Tupdate(xid, i, &tlp.size, sizeof(tlp.size), OPERATION_FIXED_PAGE_ALLOC);
}
}
TsetRaw(xid,tmp,&newFirstPage);
@ -168,7 +164,7 @@ compensated_function int TarrayListExtend(int xid, recordid rid, int slots) {
tmp.slot = MAX_OFFSET_POSITION;
int newMaxOffset = tlp.maxOffset+slots;
pageid_t newMaxOffset = tlp.maxOffset+slots;
TsetRaw(xid, tmp, &newMaxOffset);
} end_ret(compensation_error());
return 0;
@ -191,7 +187,7 @@ recordid dereferenceArrayListRid(int xid, Page * p, int offset) {
TarrayListParameters tlp = pageToTLP(xid, p);
int rec_per_page = stasis_fixed_records_per_page((size_t)tlp.size);
int lastHigh = 0;
pageid_t lastHigh = 0;
int pageRidSlot = 0; /* The slot on the root arrayList page that contains the first page of the block of interest */
assert(tlp.maxOffset >= offset);
@ -199,12 +195,12 @@ recordid dereferenceArrayListRid(int xid, Page * p, int offset) {
pageRidSlot = getBlockContainingOffset(tlp, offset, &lastHigh);
int dataSlot = offset - lastHigh; /* The offset in the block of interest of the slot we want. */
int blockPage = dataSlot / rec_per_page; /* The page in the block of interest that contains the slot we want */
pageid_t blockPage = dataSlot / rec_per_page; /* The page in the block of interest that contains the slot we want */
int blockSlot = dataSlot - blockPage * rec_per_page;
int thePage;
pageid_t thePage;
recordid rid = { p->id, pageRidSlot + FIRST_DATA_PAGE_OFFSET, sizeof(int) };
recordid rid = { p->id, pageRidSlot + FIRST_DATA_PAGE_OFFSET, sizeof(pageid_t) };
thePage = *(int*)stasis_record_read_begin(xid,p,rid);
unlock(p->rwlatch);
@ -215,14 +211,14 @@ recordid dereferenceArrayListRid(int xid, Page * p, int offset) {
return rid;
}
static int getBlockContainingOffset(TarrayListParameters tlp, int offset, int * firstSlotInBlock) {
static int getBlockContainingOffset(TarrayListParameters tlp, int offset, pageid_t * firstSlotInBlock) {
int rec_per_page = stasis_fixed_records_per_page((size_t)tlp.size);
long thisHigh = rec_per_page * tlp.initialSize;
int lastHigh = 0;
int pageRidSlot = 0;
int currentPageLength = tlp.initialSize;
while(((long)offset) >= thisHigh) {
while(((pageid_t)offset) >= thisHigh) {
pageRidSlot ++;
lastHigh = thisHigh;
currentPageLength *= tlp.multiplier;
@ -239,14 +235,14 @@ static TarrayListParameters pageToTLP(int xid, Page * p) {
TarrayListParameters tlp;
tlp.firstPage = p->id;
/* tlp.maxOffset = *(int*)fixed_record_ptr(p, 3); */
recordid rid = { p->id, 0, sizeof(int) };
tlp.initialSize = *(int*)stasis_record_read_begin(xid, p, rid);
recordid rid = { p->id, 0, sizeof(pageid_t) };
tlp.initialSize = *(pageid_t*)stasis_record_read_begin(xid, p, rid);
rid.slot = 1;
tlp.multiplier = *(int*)stasis_record_read_begin(xid, p, rid);
tlp.multiplier = *(pageid_t*)stasis_record_read_begin(xid, p, rid);
rid.slot = 2;
tlp.size = *(int*)stasis_record_read_begin(xid, p, rid);
tlp.size = *(pageid_t*)stasis_record_read_begin(xid, p, rid);
rid.slot = 3;
tlp.maxOffset = *(int*)stasis_record_read_begin(xid, p, rid);
tlp.maxOffset = *(pageid_t*)stasis_record_read_begin(xid, p, rid);
return tlp;
}

View file

@ -24,7 +24,7 @@ typedef struct {
recordid buckets;
int keySize;
int valueSize;
long nextSplit;
pageid_t nextSplit;
int bits;
long numEntries;
} lladd_hash_header;

View file

@ -37,32 +37,32 @@ pblHashTable_t * lockedBuckets = NULL;
pthread_mutex_t linearHashMutex;
pthread_cond_t bucketUnlocked;
void lockBucket(int bucket) {
while(pblHtLookup(lockedBuckets, &bucket, sizeof(int))) {
void lockBucket(pageid_t bucket) {
while(pblHtLookup(lockedBuckets, &bucket, sizeof(bucket))) {
pthread_cond_wait(&bucketUnlocked, &linearHashMutex);
}
pblHtInsert(lockedBuckets, &bucket, sizeof(int), (void*)1);
pblHtInsert(lockedBuckets, &bucket, sizeof(bucket), (void*)1);
}
int lockBucketForKey(const byte * key, int keySize, recordid * headerRidB) {
int bucket = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
pageid_t bucket = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
while(pblHtLookup(lockedBuckets, &bucket, sizeof(int))) {
while(pblHtLookup(lockedBuckets, &bucket, sizeof(bucket))) {
pthread_cond_wait(&bucketUnlocked, &linearHashMutex);
bucket = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
}
pblHtInsert(lockedBuckets, &bucket, sizeof(int), (void *) 1 );
pblHtInsert(lockedBuckets, &bucket, sizeof(bucket), (void *) 1 );
return bucket;
}
void unlockBucket(int bucket) {
pblHtRemove(lockedBuckets, &bucket, sizeof(int));
void unlockBucket(pageid_t bucket) {
pblHtRemove(lockedBuckets, &bucket, sizeof(bucket));
pthread_cond_broadcast(&bucketUnlocked);
}
void rehash(int xid, recordid hash, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize);
void update_hash_header(int xid, recordid hash, int i, int next_split);
void rehash(int xid, recordid hash, pageid_t next_split, pageid_t i, unsigned int keySize, unsigned int valSize);
void update_hash_header(int xid, recordid hash, pageid_t i, pageid_t next_split);
int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry * bucket_contents,
void * key, int keySize, int valSize, recordid * deletedEntry);
void insertIntoBucket(int xid, recordid hashRid, int bucket_number, hashEntry * bucket_contents,
@ -109,7 +109,7 @@ void expand(int xid, recordid hash, int next_split, int i, int keySize, int valS
#define FF_AM 750
if(count <= 0 && !(count * -1) % FF_AM) {
try {
recordid * headerRidB = pblHtLookup(openHashes, &(hash.page), sizeof(int));
recordid * headerRidB = pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
int j;
TarrayListExtend(xid, hash, AMORTIZE);
for(j = 0; j < AMORTIZE; j++) {
@ -128,9 +128,9 @@ void expand(int xid, recordid hash, int next_split, int i, int keySize, int valS
}
}
void update_hash_header(int xid, recordid hash, int i, int next_split) {
void update_hash_header(int xid, recordid hash, pageid_t i, pageid_t next_split) {
try {
hashEntry * he = pblHtLookup(openHashes, &(hash.page), sizeof(int));
hashEntry * he = pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
assert(he);
recordid * headerRidB = &he->next;
@ -144,7 +144,7 @@ void update_hash_header(int xid, recordid hash, int i, int next_split) {
} end;
}
void rehash(int xid, recordid hashRid, unsigned int next_split, unsigned int i, unsigned int keySize, unsigned int valSize) {
void rehash(int xid, recordid hashRid, pageid_t next_split, pageid_t i, unsigned int keySize, unsigned int valSize) {
try {
int firstA = 1; // Is 'A' the recordid of a bucket?
int firstD = 1; // What about 'D'?
@ -408,7 +408,7 @@ recordid ThashAlloc(int xid, int keySize, int valSize) {
assert(headerRidB);
pblHtInsert(openHashes, &(rid.page), sizeof(int), headerRidB);
pblHtInsert(openHashes, &(rid.page), sizeof(rid.page), headerRidB);
assert(headerRidB);
Page * p = loadPage(xid, rid.page);
@ -456,7 +456,7 @@ void TnaiveHashInsert(int xid, recordid hashRid,
void * key, int keySize,
void * val, int valSize) {
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(int));
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
int bucket = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
@ -479,7 +479,7 @@ void TnaiveHashInsert(int xid, recordid hashRid,
so that expand can be selectively called. */
int TnaiveHashDelete(int xid, recordid hashRid,
void * key, int keySize, int valSize) {
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(int));
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
int bucket_number = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
recordid deleteMe;
@ -505,7 +505,7 @@ int ThashOpen(int xid, recordid hashRid, int keySize, int valSize) {
hashRid.slot = 1;
Tread(xid, hashRid, headerRidB);
pblHtInsert(openHashes, &(hashRid.page), sizeof(int), headerRidB);
pblHtInsert(openHashes, &(hashRid.page), sizeof(hashRid.page), headerRidB);
return 0;
}
@ -518,15 +518,15 @@ void TnaiveHashUpdate(int xid, recordid hashRid, void * key, int keySize, void *
int ThashClose(int xid, recordid hashRid) {
recordid * freeMe = pblHtLookup(openHashes, &(hashRid.page), sizeof(int));
pblHtRemove(openHashes, &(hashRid.page), sizeof(int));
recordid * freeMe = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
pblHtRemove(openHashes, &(hashRid.page), sizeof(hashRid.page));
free(freeMe);
return 0;
}
int TnaiveHashLookup(int xid, recordid hashRid, void * key, int keySize, void * buf, int valSize) {
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(int));
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
int bucket_number = hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
int ret = findInBucket(xid, hashRid, bucket_number, key, keySize, buf, valSize);
return ret;

View file

@ -34,10 +34,10 @@ static int op_page_set_range_inverse(const LogEntry* e, Page* p) {
return 0;
}
compensated_function int TpageGet(int xid, int pageid, void *memAddr) {
compensated_function int TpageGet(int xid, pageid_t page, void *memAddr) {
Page * q = 0;
try_ret(compensation_error()) {
q = loadPage(xid, pageid);
q = loadPage(xid, page);
memcpy(memAddr, q->memAddr, PAGE_SIZE);
} end_ret(compensation_error());
try_ret(compensation_error()) {
@ -46,18 +46,14 @@ compensated_function int TpageGet(int xid, int pageid, void *memAddr) {
return 0;
}
compensated_function int TpageSet(int xid, int pageid, const void * memAddr) {
return TpageSetRange(xid, pageid, 0, memAddr, PAGE_SIZE);
compensated_function int TpageSet(int xid, pageid_t page, const void * memAddr) {
return TpageSetRange(xid, page, 0, memAddr, PAGE_SIZE);
}
int TpageSetRange(int xid, int pageid, int offset, const void * memAddr, int len) {
int TpageSetRange(int xid, pageid_t page, int offset, const void * memAddr, int len) {
// XXX need to pack offset into front of log entry
recordid rid;
rid.page = pageid;
rid.slot = 0;
rid.size = 0;
Page * p = loadPage(xid, rid.page);
Page * p = loadPage(xid, page);
byte * logArg = malloc(sizeof(int) + 2 * len);
*(int*)logArg = offset;
@ -65,7 +61,7 @@ int TpageSetRange(int xid, int pageid, int offset, const void * memAddr, int len
memcpy(logArg+sizeof(int)+len, p->memAddr+offset, len);
try_ret(compensation_error()) {
Tupdate(xid,rid,logArg,sizeof(int)+len*2,OPERATION_PAGE_SET_RANGE);
Tupdate(xid,page,logArg,sizeof(int)+len*2,OPERATION_PAGE_SET_RANGE);
} end_ret(compensation_error());
free(logArg);
@ -93,12 +89,12 @@ compensated_function void pageOperationsInit() {
}
compensated_function int TpageDealloc(int xid, int pageid) {
TregionDealloc(xid, pageid); // @todo inefficient hack!
compensated_function int TpageDealloc(int xid, pageid_t page) {
TregionDealloc(xid, page); // @todo inefficient hack!
return 0;
}
compensated_function int TpageAlloc(int xid /*, int type */) {
compensated_function pageid_t TpageAlloc(int xid) {
return TregionAlloc(xid, 1, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
}
@ -111,18 +107,16 @@ int op_fixed_page_alloc(const LogEntry* e, Page* p) {
/**
@return a recordid. The page field contains the page that was
@return a pageid_t. The page field contains the page that was
allocated, the slot field contains the number of slots on the
apge, and the size field contains the size of each slot.
*/
recordid TfixedPageAlloc(int xid, int size) {
int page = TpageAlloc(xid);
pageid_t TfixedPageAlloc(int xid, int size) {
pageid_t page = TpageAlloc(xid);
recordid rid = {page, stasis_fixed_records_per_page(size), size};
Tupdate(xid, page, &size, sizeof(int), OPERATION_FIXED_PAGE_ALLOC);
Tupdate(xid, rid, &size, sizeof(int), OPERATION_FIXED_PAGE_ALLOC);
return rid;
return page;
}
Operation getFixedPageAlloc() {
@ -134,12 +128,12 @@ Operation getFixedPageAlloc() {
return o;
}
compensated_function int TpageAllocMany(int xid, int count /*, int type*/) {
compensated_function pageid_t TpageAllocMany(int xid, int count) {
return TregionAlloc(xid, count, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
}
int TpageGetType(int xid, int pageid) {
Page * p = loadPage(xid, pageid);
int TpageGetType(int xid, pageid_t page) {
Page * p = loadPage(xid, page);
int ret = *stasis_page_type_ptr(p);
releasePage(p);
return ret;

View file

@ -5,18 +5,18 @@
#include <assert.h>
typedef struct regionAllocLogArg{
int startPage;
unsigned int pageCount;
pageid_t startPage;
pageid_t pageCount;
int allocationManager;
} regionAllocArg;
static pthread_mutex_t region_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_t holding_mutex;
static void TregionAllocHelper(int xid, unsigned int pageid, unsigned int pageCount, int allocationManager);
static void TallocBoundaryTag(int xid, unsigned int page, boundary_tag* tag);
static void TregionAllocHelper(int xid, pageid_t page, pageid_t pageCount, int allocationManager);
static void TallocBoundaryTag(int xid, pageid_t page, boundary_tag* tag);
static int readBoundaryTag(int xid, pageid_t page, boundary_tag* tag);
static void TsetBoundaryTag(int xid, unsigned int page, boundary_tag* tag);
static void TdeallocBoundaryTag(int xid, unsigned int page);
static void TsetBoundaryTag(int xid, pageid_t page, boundary_tag* tag);
static void TdeallocBoundaryTag(int xid, pageid_t page);
/** This doesn't need a latch since it is only initiated within nested
top actions (and is local to this file. During abort(), the nested
@ -47,7 +47,7 @@ static int op_alloc_region(const LogEntry *e, Page* p) {
static int operate_dealloc_region_unlocked(int xid, regionAllocArg *dat) {
unsigned int firstPage = dat->startPage + 1;
pageid_t firstPage = dat->startPage + 1;
boundary_tag t;
@ -77,11 +77,10 @@ static int op_dealloc_region(const LogEntry* e, Page* p) {
return ret;
}
static void TallocBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
static void TallocBoundaryTag(int xid, pageid_t page, boundary_tag* tag) {
//printf("Alloc boundary tag at %d = { %d, %d, %d }\n", page, tag->size, tag->prev_size, tag->status);
assert(holding_mutex == pthread_self());
recordid rid = {page, 0, 0};
Tupdate(xid, rid, tag, sizeof(boundary_tag), OPERATION_ALLOC_BOUNDARY_TAG);
Tupdate(xid, page, tag, sizeof(boundary_tag), OPERATION_ALLOC_BOUNDARY_TAG);
}
int readBoundaryTag(int xid, pageid_t page, boundary_tag* tag) {
@ -91,7 +90,7 @@ int readBoundaryTag(int xid, pageid_t page, boundary_tag* tag) {
return 0;
}
Tread(xid, rid, tag);
assert((page == 0 && tag->prev_size == UINT32_MAX) || (page != 0 && tag->prev_size != UINT32_MAX));
assert((page == 0 && tag->prev_size == PAGEID_T_MAX) || (page != 0 && tag->prev_size != PAGEID_T_MAX));
//printf("Read boundary tag at %d = { %d, %d, %d }\n", page, tag->size, tag->prev_size, tag->status);
return 1;
}
@ -105,11 +104,11 @@ int TregionReadBoundaryTag(int xid, pageid_t page, boundary_tag* tag) {
return ret;
}
static void TsetBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
static void TsetBoundaryTag(int xid, pageid_t page, boundary_tag* tag) {
//printf("Write boundary tag at %d = { %d, %d, %d }\n", page, tag->size, tag->prev_size, tag->status);
// Sanity checking:
assert((page == 0 && tag->prev_size == UINT32_MAX) || (page != 0 && tag->prev_size < UINT32_MAX/2));
assert((page == 0 && tag->prev_size == PAGEID_T_MAX) || (page != 0 && tag->prev_size < PAGEID_T_MAX/2));
assert(holding_mutex == pthread_self());
boundary_tag t2;
@ -122,7 +121,7 @@ static void TsetBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
Tset(xid, rid, tag);
}
static void TdeallocBoundaryTag(int xid, unsigned int page) {
static void TdeallocBoundaryTag(int xid, pageid_t page) {
boundary_tag t;
assert(holding_mutex == pthread_self());
@ -141,8 +140,8 @@ void regionsInit() {
holding_mutex = pthread_self();
if(pageType != BOUNDARY_TAG_PAGE) {
boundary_tag t;
t.size = UINT32_MAX;
t.prev_size = UINT32_MAX;
t.size = PAGEID_T_MAX;
t.prev_size = PAGEID_T_MAX;
t.status = REGION_VACANT;
t.region_xid = INVALID_XID;
t.allocation_manager = 0;
@ -176,7 +175,7 @@ int TregionNextBoundaryTag(int xid, pageid_t* pid, boundary_tag * tag, int type)
int ret = readBoundaryTag(xid, *pid-1, tag);
if(ret) {
while(1) {
if(tag->size == UINT32_MAX) {
if(tag->size == PAGEID_T_MAX) {
ret = 0;
break;
}
@ -204,22 +203,22 @@ void fsckRegions(int xid) {
int pageType;
boundary_tag tag;
boundary_tag prev_tag;
prev_tag.size = UINT32_MAX;
int tagPage = 0;
prev_tag.size = PAGEID_T_MAX;
pageid_t tagPage = 0;
pageType = TpageGetType(xid, tagPage);
assert(pageType == BOUNDARY_TAG_PAGE);
int ret =readBoundaryTag(xid, tagPage, &tag);
assert(ret);
assert(tag.prev_size == UINT32_MAX);
assert(tag.prev_size == PAGEID_T_MAX);
while(tag.size != UINT32_MAX) {
while(tag.size != PAGEID_T_MAX) {
// Ignore region_xid, allocation_manager for now.
assert(tag.status == REGION_VACANT || tag.status == REGION_ZONED);
assert(prev_tag.size == tag.prev_size);
for(int i = 0; i < tag.size; i++) {
int thisPage = tagPage + 1 + i;
for(pageid_t i = 0; i < tag.size; i++) {
pageid_t thisPage = tagPage + 1 + i;
pageType = TpageGetType(xid, thisPage);
if(pageType == BOUNDARY_TAG_PAGE) {
@ -246,10 +245,10 @@ void fsckRegions(int xid) {
}
static void TregionAllocHelper(int xid, unsigned int pageid, unsigned int pageCount, int allocationManager) {
static void TregionAllocHelper(int xid, pageid_t page, pageid_t pageCount, int allocationManager) {
boundary_tag t;
int ret = readBoundaryTag(xid, pageid, &t);
int ret = readBoundaryTag(xid, page, &t);
assert(ret);
if(t.size != pageCount) {
@ -258,22 +257,22 @@ static void TregionAllocHelper(int xid, unsigned int pageid, unsigned int pageCo
assert(t.size > pageCount);
unsigned int newPageid = pageid + pageCount + 1;
pageid_t newPageid = page + pageCount + 1;
boundary_tag new_tag;
if(t.size != UINT32_MAX) {
if(t.size != PAGEID_T_MAX) {
new_tag.size = t.size - pageCount - 1; // pageCount must be strictly less than t->size, so this is non-negative.
boundary_tag succ_tag;
int ret = readBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
int ret = readBoundaryTag(xid, page + t.size + 1, &succ_tag);
assert(ret);
succ_tag.prev_size = new_tag.size;
TsetBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
TsetBoundaryTag(xid, page + t.size + 1, &succ_tag);
} else {
new_tag.size = UINT32_MAX;
new_tag.size = PAGEID_T_MAX;
}
new_tag.prev_size = pageCount;
@ -294,11 +293,11 @@ static void TregionAllocHelper(int xid, unsigned int pageid, unsigned int pageCo
t.allocation_manager = allocationManager;
t.size = pageCount;
TsetBoundaryTag(xid, pageid, &t);
TsetBoundaryTag(xid, page, &t);
}
static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag *t) {
static void consolidateRegions(int xid, pageid_t * firstPage, boundary_tag *t) {
if(t->status != REGION_VACANT || TisActiveTransaction(t->region_xid)) { return; }
@ -307,16 +306,16 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
int mustWriteOriginalTag = 0;
// If successor is vacant, merge.
if(t->size != UINT32_MAX) { // is there a successor?
unsigned int succ_page = (*firstPage) + 1 + t->size;
if(t->size != PAGEID_T_MAX) { // is there a successor?
pageid_t succ_page = (*firstPage) + 1 + t->size;
boundary_tag succ_tag;
int ret = readBoundaryTag(xid, succ_page, &succ_tag);
assert(ret);
// TODO: Check stasis_page_type_ptr()...
if(succ_tag.size == UINT32_MAX) {
t->size = UINT32_MAX;
if(succ_tag.size == PAGEID_T_MAX) {
t->size = PAGEID_T_MAX;
assert(succ_tag.status == REGION_VACANT);
// TODO: Truncate page file.
TdeallocBoundaryTag(xid, succ_page);
@ -324,7 +323,7 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
} else if(succ_tag.status == REGION_VACANT && (!TisActiveTransaction(succ_tag.region_xid))) {
t->size = t->size + succ_tag.size + 1;
unsigned int succ_succ_page = succ_page + succ_tag.size + 1;
pageid_t succ_succ_page = succ_page + succ_tag.size + 1;
boundary_tag succ_succ_tag;
@ -346,9 +345,9 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
// creates a situation where the current page is not a boundary
// tag...)
if(t->prev_size != UINT32_MAX) {
if(t->prev_size != PAGEID_T_MAX) {
unsigned int pred_page = ((*firstPage) - 1) - t->prev_size; // If the predecessor is length zero, then it's boundary tag is two pages before this region's tag.
pageid_t pred_page = ((*firstPage) - 1) - t->prev_size; // If the predecessor is length zero, then it's boundary tag is two pages before this region's tag.
boundary_tag pred_tag;
int ret = readBoundaryTag(xid, pred_page, &pred_tag);
@ -358,8 +357,8 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
TdeallocBoundaryTag(xid, *firstPage);
if(t->size == UINT32_MAX) {
pred_tag.size = UINT32_MAX;
if(t->size == PAGEID_T_MAX) {
pred_tag.size = PAGEID_T_MAX;
// TODO: truncate region
@ -367,7 +366,7 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
pred_tag.size += (t->size + 1);
unsigned int succ_page = (*firstPage) + 1+ t->size;
pageid_t succ_page = (*firstPage) + 1+ t->size;
assert(pred_page + pred_tag.size + 1 == succ_page);
boundary_tag succ_tag;
@ -397,7 +396,7 @@ static void consolidateRegions(int xid, unsigned int * firstPage, boundary_tag
}
void TregionDealloc(int xid, unsigned int firstPage) {
void TregionDealloc(int xid, pageid_t firstPage) {
// Note that firstPage is the first *caller visible* page in the
// region. The boundary tag is stored on firstPage - 1. Also, note
@ -429,7 +428,7 @@ void TregionDealloc(int xid, unsigned int firstPage) {
pthread_mutex_unlock(&region_mutex);
}
unsigned int TregionAlloc(int xid, unsigned int pageCount, int allocationManager) {
pageid_t TregionAlloc(int xid, pageid_t pageCount, int allocationManager) {
// Initial implementation. Naive first fit.
pthread_mutex_lock(&region_mutex);
@ -438,7 +437,7 @@ unsigned int TregionAlloc(int xid, unsigned int pageCount, int allocationManager
void * ntaHandle = TbeginNestedTopAction(xid, OPERATION_NOOP, 0, 0);
unsigned int pageid = 0;
pageid_t pageid = 0;
boundary_tag t;
int ret = readBoundaryTag(xid, pageid, &t); // XXX need to check if there is a boundary tag there or not!
@ -526,27 +525,27 @@ Operation getDeallocRegionInverse() {
return o;
}
void TregionFindNthActive(int xid, unsigned int regionNumber, unsigned int * firstPage, unsigned int * size) {
void TregionFindNthActive(int xid, pageid_t regionNumber, pageid_t * firstPage, pageid_t * size) {
boundary_tag t;
recordid rid = {0, 0, sizeof(boundary_tag)};
pthread_mutex_lock(&region_mutex);
holding_mutex = pthread_self();
Tread(xid, rid, &t);
unsigned int prevSize = 0;
pageid_t prevSize = 0;
while(t.status == REGION_VACANT) {
rid.page += (t.size + 1);
Tread(xid, rid, &t);
assert(t.size != UINT_MAX);
assert(t.prev_size != UINT_MAX);
assert(t.size != PAGEID_T_MAX);
assert(t.prev_size != PAGEID_T_MAX);
assert(prevSize == t.prev_size || !prevSize);
prevSize = t.size;
}
for(int i = 0; i < regionNumber; i++) {
for(pageid_t i = 0; i < regionNumber; i++) {
rid.page += (t.size + 1);
Tread(xid, rid, &t);
if(t.status == REGION_VACANT) { i--; }
assert(t.size != UINT_MAX);
assert(t.prev_size != UINT_MAX || i == 0);
assert(t.size != PAGEID_T_MAX);
assert(t.prev_size != PAGEID_T_MAX || i == 0);
assert(prevSize == t.prev_size || !prevSize);
prevSize = t.size;
}

View file

@ -112,7 +112,7 @@ int Tset(int xid, recordid rid, const void * dat) {
b += rid.size;
Tread(xid, rid, b);
Tupdate(xid,rid,buf,sz,OPERATION_SET);
Tupdate(xid,rid.page,buf,sz,OPERATION_SET);
free(buf);
}
return 0;
@ -129,9 +129,7 @@ int TsetRaw(int xid, recordid rid, const void * dat) {
memcpy(b, dat, rid.size);
b += rid.size;
TreadRaw(xid, rid, b);
// XXX get rid of recordid dereference assert in Tupdate, then change this
// to call Tupdate
TupdateRaw(xid,rid,buf,sz,OPERATION_SET);
Tupdate(xid,rid.page,buf,sz,OPERATION_SET);
free(buf);
return 0;
}
@ -206,7 +204,7 @@ compensated_function void TsetRange(int xid, recordid rid, int offset, int lengt
unlock(p->rwlatch);
Tupdate(xid, rid, range, sizeof(set_range_t) + 2 * length, OPERATION_SET_RANGE);
Tupdate(xid, rid.page, range, sizeof(set_range_t) + 2 * length, OPERATION_SET_RANGE);
free(range);
releasePage(p);

View file

@ -35,7 +35,7 @@ compensated_function recordid dereferenceIndirectRID(int xid, recordid rid) {
offset += *maxslot_ptr(page, i - 1);
} /** else, the adjustment to the offset is zero */
int nextPage = *page_ptr(page, i);
pageid_t nextPage = *page_ptr(page, i);
unlock(page->rwlatch);
releasePage(page);
@ -69,7 +69,7 @@ unsigned int calculate_level (unsigned int number_of_pages) {
return level;
}
compensated_function recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount);
compensated_function recordid __rallocMany(int xid, pageid_t parentPage, int recordSize, int recordCount);
/**
@todo is there a way to implement rallocMany so that it doesn't
have to physically log pre- and post-images of the allocated space?
@ -87,7 +87,7 @@ compensated_function recordid rallocMany(int xid, int recordSize, int recordCoun
return ret;
}
compensated_function recordid __rallocMany(int xid, int parentPage, int recordSize, int recordCount) {
compensated_function recordid __rallocMany(int xid, pageid_t parentPage, int recordSize, int recordCount) {
/* How many levels of pages do we need? */
@ -121,21 +121,21 @@ compensated_function recordid __rallocMany(int xid, int parentPage, int recordSi
/* OK, now allocate the pages. */
int next_level_records_per_page = records_per_page;
pageid_t next_level_records_per_page = records_per_page;
for(int i = 0; i < (level - 1); i++) {
next_level_records_per_page *= INDIRECT_POINTERS_PER_PAGE;
}
int newPageCount = (int)ceil((double)recordCount / (double)next_level_records_per_page);
int firstChildPage;
pageid_t newPageCount = (int)ceil((double)recordCount / (double)next_level_records_per_page);
pageid_t firstChildPage;
try_ret(NULLRID) {
firstChildPage = TpageAllocMany(xid, newPageCount/*, SLOTTED_PAGE*/);/*pageAllocMultiple(newPageCount); */
firstChildPage = TpageAllocMany(xid, newPageCount);
} end_ret(NULLRID);
int tmpRecordCount = recordCount;
int thisChildPage = firstChildPage;
pageid_t tmpRecordCount = recordCount;
pageid_t thisChildPage = firstChildPage;
while(tmpRecordCount > 0) {
try_ret(NULLRID) {
@ -152,7 +152,7 @@ compensated_function recordid __rallocMany(int xid, int parentPage, int recordSi
indirectInitialize(&p, level);
int i = 0;
pageid_t i = 0;
for(tmpRecordCount = recordCount; tmpRecordCount > 0; tmpRecordCount -= next_level_records_per_page) {
@ -175,7 +175,7 @@ compensated_function recordid __rallocMany(int xid, int parentPage, int recordSi
writelock(p.rwlatch,0);
stasis_slotted_initialize_page(&p);
p.id = parentPage;
for(int i = 0; i < recordCount; i++) {
for(pageid_t i = 0; i < recordCount; i++) {
/* Normally, we would worry that the page id isn't set, but
we're discarding the recordid returned by page ralloc
anyway. */

View file

@ -261,14 +261,14 @@ int Tbegin() {
return XactionTable[index].xid;
}
static compensated_function void TactionHelper(int xid, recordid rid,
static compensated_function void TactionHelper(int xid,
const void * dat, size_t datlen, int op,
Page * p) {
LogEntry * e;
assert(xid >= 0);
try {
if(globalLockManager.writeLockPage) {
globalLockManager.writeLockPage(xid, rid.page);
globalLockManager.writeLockPage(xid, p->id);
}
} end;
@ -284,28 +284,23 @@ static compensated_function void TactionHelper(int xid, recordid rid,
}
// XXX remove this function once it's clear that nobody is failing the assert in Tupdate()
compensated_function void TupdateRaw(int xid, recordid rid, const void * dat, size_t datlen,
/*compensated_function void TupdateRaw(int xid, pageid_t page, const void * dat, size_t datlen,
int op) {
assert(xid >= 0);
Page * p = loadPage(xid, rid.page);
Page * p = loadPage(xid, page);
TactionHelper(xid, rid, dat, datlen, op, p);
releasePage(p);
}
}*/
compensated_function void TupdateStr(int xid, recordid rid,
compensated_function void TupdateStr(int xid, pageid_t page,
const char *dat, size_t datlen, int op) {
Tupdate(xid, rid, dat, datlen, op);
Tupdate(xid, page, dat, datlen, op);
}
compensated_function void Tupdate(int xid, recordid rid,
compensated_function void Tupdate(int xid, pageid_t page,
const void *dat, size_t datlen, int op) {
Page * p = loadPage(xid, rid.page);
readlock(p->rwlatch,0);
recordid rid2 = stasis_record_dereference(xid, p, rid);
assert(rid2.page == rid.page);
unlock(p->rwlatch);
TactionHelper(xid, rid, dat, datlen, op, p);
Page * p = loadPage(xid, page);
TactionHelper(xid, dat, datlen, op, p);
releasePage(p);
}

View file

@ -29,11 +29,11 @@ void dirtyPages_add(Page * p) {
if(!p->dirty) {
p->dirty = 1;
//assert(p->LSN);
void* ret = pblHtLookup(dirtyPages, &(p->id), sizeof(int));
void* ret = pblHtLookup(dirtyPages, &(p->id), sizeof(p->id));
assert(!ret);
lsn_t * insert = malloc(sizeof(lsn_t));
*insert = p->LSN;
pblHtInsert(dirtyPages, &(p->id), sizeof(int), insert); //(void*)p->LSN);
pblHtInsert(dirtyPages, &(p->id), sizeof(p->id), insert); //(void*)p->LSN);
}
pthread_mutex_unlock(&dirtyPages_mutex);
}
@ -44,8 +44,8 @@ void dirtyPages_remove(Page * p) {
//assert(pblHtLookup(dirtyPages, &(p->id), sizeof(int)));
// printf("With lsn = %d\n", (lsn_t)pblHtCurrent(dirtyPages));
p->dirty = 0;
lsn_t * old = pblHtLookup(dirtyPages, &(p->id),sizeof(int));
pblHtRemove(dirtyPages, &(p->id), sizeof(int));
lsn_t * old = pblHtLookup(dirtyPages, &(p->id),sizeof(p->id));
pblHtRemove(dirtyPages, &(p->id), sizeof(p->id));
if(old) {
free(old);
}
@ -65,10 +65,10 @@ int dirtyPages_isDirty(Page * p) {
static lsn_t dirtyPages_minRecLSN() {
lsn_t lsn = LSN_T_MAX; // LogFlushedLSN ();
int* pageid;
pageid_t* pageid;
pthread_mutex_lock(&dirtyPages_mutex);
for( pageid = (int*)pblHtFirst (dirtyPages); pageid; pageid = (int*)pblHtNext(dirtyPages)) {
for( pageid = (pageid_t*)pblHtFirst (dirtyPages); pageid; pageid = (pageid_t*)pblHtNext(dirtyPages)) {
lsn_t * thisLSN = (lsn_t*) pblHtCurrent(dirtyPages);
// printf("lsn = %d\n", thisLSN);
if(*thisLSN < lsn) {
@ -81,8 +81,7 @@ static lsn_t dirtyPages_minRecLSN() {
}
static void dirtyPages_flush() {
// XXX Why was this MAX_BUFFER_SIZE+1?!?
int * staleDirtyPages = malloc(sizeof(int) * (MAX_BUFFER_SIZE));
pageid_t * staleDirtyPages = malloc(sizeof(pageid_t) * (MAX_BUFFER_SIZE));
int i;
for(i = 0; i < MAX_BUFFER_SIZE; i++) {
staleDirtyPages[i] = -1;
@ -93,7 +92,7 @@ static void dirtyPages_flush() {
i = 0;
for(tmp = pblHtFirst(dirtyPages); tmp; tmp = pblHtNext(dirtyPages)) {
staleDirtyPages[i] = *((int*) pblHtCurrentKey(dirtyPages));
staleDirtyPages[i] = *((pageid_t*) pblHtCurrentKey(dirtyPages));
i++;
}
assert(i < MAX_BUFFER_SIZE);
@ -107,7 +106,7 @@ static void dirtyPages_flush() {
free(staleDirtyPages);
}
void dirtyPages_flushRange(pageid_t start, pageid_t stop) {
int * staleDirtyPages = malloc(sizeof(int) * (MAX_BUFFER_SIZE));
pageid_t * staleDirtyPages = malloc(sizeof(pageid_t) * (MAX_BUFFER_SIZE));
int i;
Page * p = 0;
@ -116,7 +115,7 @@ void dirtyPages_flushRange(pageid_t start, pageid_t stop) {
void *tmp;
i = 0;
for(tmp = pblHtFirst(dirtyPages); tmp; tmp = pblHtNext(dirtyPages)) {
int num = *((int*) pblHtCurrentKey(dirtyPages));
pageid_t num = *((pageid_t*) pblHtCurrentKey(dirtyPages));
if(num <= start && num < stop) {
staleDirtyPages[i] = num;
i++;

View file

@ -1,12 +1,14 @@
#ifndef ALLOCATION_POLICY_H
#define ALLOCATION_POLICY_H
#include <stasis/common.h>
struct allocationPolicy;
typedef struct allocationPolicy allocationPolicy;
typedef struct availablePage {
int freespace;
int pageid;
pageid_t pageid;
int lockCount; // Number of active transactions that have alloced or dealloced from this page.
} availablePage;
@ -17,6 +19,6 @@ availablePage * allocationPolicyFindPage(allocationPolicy * ap, int xid, int fre
void allocationPolicyTransactionCompleted(allocationPolicy * ap, int xid);
void allocationPolicyUpdateFreespaceUnlockedPage(allocationPolicy * ap, availablePage * key, int newFree);
void allocationPolicyUpdateFreespaceLockedPage(allocationPolicy * ap, int xid, availablePage * key, int newFree);
void allocationPolicyLockPage(allocationPolicy * ap, int xid, int pageid);
void allocationPolicyAllocedFromPage(allocationPolicy * ap, int xid, int pageid);
void allocationPolicyLockPage(allocationPolicy * ap, int xid, pageid_t page);
void allocationPolicyAllocedFromPage(allocationPolicy * ap, int xid, pageid_t page);
#endif // ALLOCATION_POLICY_H

View file

@ -106,16 +106,16 @@ typedef struct Page_s Page;
*
* @return fully formed Page type
*/
Page * loadPage(int xid, int pageid);
Page * loadPage(int xid, pageid_t pageid);
Page * loadUninitializedPage(int xid, int pageid);
Page * loadUninitializedPage(int xid, pageid_t pageid);
/**
This is the function pointer that bufInit sets in order to
override loadPage.
*/
extern Page * (*loadPageImpl)(int xid, int pageid);
extern Page * (*loadUninitPageImpl)(int xid, int pageid);
extern Page * (*loadPageImpl)(int xid, pageid_t pageid);
extern Page * (*loadUninitPageImpl)(int xid, pageid_t pageid);
/**
loadPage aquires a lock when it is called, effectively pinning it
in memory. releasePage releases this lock.
@ -171,7 +171,7 @@ extern void (*bufDeinit)();
#define loadPage(x,y) __profile_loadPage((x), (y), __FILE__, __LINE__)
#define releasePage(x) __profile_releasePage((x))
compensated_function void __profile_releasePage(Page * p);
compensated_function Page * __profile_loadPage(int xid, int pageid, char * file, int line);
compensated_function Page * __profile_loadPage(int xid, pageid_t pageid, char * file, int line);
#endif

View file

@ -59,11 +59,4 @@ Page* pageMalloc();
@see pageMalloc()
*/
void pageFree(Page * p, int id);
/**
obtains the type of the record pointed to by rid.
@return UNINITIALIZED_RECORD, BLOB_RECORD, SLOTTED_RECORD, FIXED_RECORD or an error code.
*/
void pageFree(Page * p, pageid_t id);

View file

@ -1,5 +1,6 @@
#include <stasis/crc32.h>
#include <stasis/common.h>
#ifndef __HASH_H
#define __HASH_H
@ -7,16 +8,17 @@
@todo write a test harness for this...
*/
unsigned int max_bucket(unsigned char tableBits, unsigned int nextExtension);
uint64_t max_bucket(unsigned char tableBits, uint64_t nextExtension);
/**
This function maps from the length of the bucket list to a appropriate set
of linear hash parameters to fill that size.
*/
void hashGetParamsForSize(unsigned int desiredSize, unsigned char *tableBits,
unsigned int* nextExtension);
unsigned int hash(const void * val, long val_length, unsigned char tableBits, unsigned int nextExtension);
void hashGetParamsForSize(uint64_t desiredSize, unsigned char *tableBits,
uint64_t* nextExtension);
/**
XXX despite it's interface, hash can't return values > 2^32!
*/
uint64_t hash(const void * val, uint64_t val_length, unsigned char tableBits, uint64_t nextExtension);
#define twoToThe(x) (1 << (x))
/** @todo logBase2 should be able to handle 64 bit values, but
currently doesn't...*/
unsigned int logBase2(unsigned int value);
uint32_t logBase2(uint64_t value);
#endif /*__HASH_H */

View file

@ -2,9 +2,9 @@
typedef struct {
void (*init)();
int (*readLockPage) (int xid, int page);
int (*writeLockPage) (int xid, int page);
int (*unlockPage) (int xid, int page);
int (*readLockPage) (int xid, pageid_t page);
int (*writeLockPage) (int xid, pageid_t page);
int (*unlockPage) (int xid, pageid_t page);
int (*readLockRecord) (int xid, recordid rid);
int (*writeLockRecord)(int xid, recordid rid);
int (*unlockRecord) (int xid, recordid rid);

View file

@ -34,7 +34,7 @@ void TallocDeinit();
*/
compensated_function recordid Talloc(int xid, unsigned long size);
compensated_function recordid TallocFromPage(int xid, long page, unsigned long size);
compensated_function recordid TallocFromPage(int xid, pageid_t page, unsigned long size);
/**
Free a record.
@ -69,9 +69,9 @@ compensated_function int TrecordType(int xid, recordid rid);
compensated_function int TrecordSize(int xid, recordid rid);
/** Return the number of records stored in page pageid */
compensated_function int TrecordsInPage(int xid, int pageid);
compensated_function int TrecordsInPage(int xid, pageid_t page);
compensated_function void TinitializeSlottedPage(int xid, int pageid);
compensated_function void TinitializeFixedPage(int xid, int pageid, int slotLength);
compensated_function void TinitializeSlottedPage(int xid, pageid_t page);
compensated_function void TinitializeFixedPage(int xid, pageid_t page, int slotLength);
#endif

View file

@ -92,7 +92,7 @@ terms specified in this license.
*/
compensated_function recordid TarrayListAlloc(int xid, int numPages, int multiplier, int recordSize);
compensated_function recordid TarrayListAlloc(int xid, pageid_t numPages, int multiplier, int recordSize);
/**
Extend the ArrayList in place.

View file

@ -58,7 +58,7 @@ terms specified in this license.
#include <stasis/constants.h>
static inline void Tdecrement(int xid, recordid rid) {
Tupdate(xid,rid,&rid.slot,sizeof(rid.slot),OPERATION_DECREMENT);
Tupdate(xid,rid.page,&rid.slot,sizeof(rid.slot),OPERATION_DECREMENT);
}
Operation getDecrement();

View file

@ -64,7 +64,7 @@ terms specified in this license.
#include <stasis/constants.h>
static inline void Tincrement(int xid, recordid rid) {
Tupdate(xid,rid,&rid.slot,sizeof(rid.slot),OPERATION_INCREMENT);
Tupdate(xid,rid.page,&rid.slot,sizeof(rid.slot),OPERATION_INCREMENT);
}
Operation getIncrement();

View file

@ -20,8 +20,6 @@ recordid ThashAlloc(int xid, int keySize, int valSize) ;
void TnaiveHashInsert(int xid, recordid hashRid,
void * key, int keySize,
void * val, int valSize);
/*void ThashDelete(int xid, recordid hashRid,
void * key, int keySize);*/
int TnaiveHashDelete(int xid, recordid hashRid,
void * key, int keySize, int valSize);
void TnaiveHashUpdate(int xid, recordid hashRid, void * key, int keySize, void * val, int valSize);
@ -30,7 +28,7 @@ void ThashInit();
void ThashDeinit();
int ThashOpen(int xid, recordid hashRid, int keySize, int valSize);
int ThashClose(int xid, recordid hashRid) ;
void lockBucket(int bucket);
void unlockBucket(int bucket);
void lockBucket(pageid_t bucket);
void unlockBucket(pageid_t bucket);
int lockBucketForKey(const byte * key, int keySize, recordid * headerRidB);
#endif

View file

@ -60,14 +60,14 @@ terms specified in this license.
#ifndef __PAGE_OPERATIONS_H__
#define __PAGE_OPERATIONS_H__
compensated_function int TpageAlloc(int xid/*, int type*/);
compensated_function recordid TfixedPageAlloc(int xid, int size);
compensated_function int TpageAllocMany(int xid, int count/*, int type*/);
compensated_function int TpageDealloc(int xid, int pageid);
compensated_function int TpageSet(int xid, int pageid, const void* dat);
compensated_function int TpageSetRange(int xid, int pageid, int offset, const void* dat, int len);
compensated_function int TpageGet(int xid, int pageid, void* buf);
int TpageGetType(int xid, int pageid);
compensated_function pageid_t TpageAlloc(int xid);
compensated_function pageid_t TfixedPageAlloc(int xid, int size);
compensated_function pageid_t TpageAllocMany(int xid, int count);
compensated_function int TpageDealloc(int xid, pageid_t page);
compensated_function int TpageSet(int xid, pageid_t page, const void* dat);
compensated_function int TpageSetRange(int xid, pageid_t page, int offset, const void* dat, int len);
compensated_function int TpageGet(int xid, pageid_t page, void* buf);
int TpageGetType(int xid, pageid_t page);
Operation getPageSetRange();
Operation getPageSetRangeInverse();

View file

@ -10,8 +10,8 @@
*/
typedef struct boundary_tag {
unsigned int size;
unsigned int prev_size;
pageid_t size;
pageid_t prev_size;
int status;
int region_xid;
int allocation_manager;
@ -25,12 +25,12 @@ typedef struct boundary_tag {
void regionsInit();
unsigned int TregionAlloc(int xid, unsigned int pageCount, int allocaionManager);
void TregionDealloc(int xid, unsigned int firstPage);
unsigned int TregionSize(int xid, unsigned int firstPage);
pageid_t TregionAlloc(int xid, pageid_t pageCount, int allocaionManager);
void TregionDealloc(int xid, pageid_t firstPage);
unsigned int TregionSize(int xid, pageid_t firstPage);
/** Currently, this function is O(n) in the number of regions, so be careful! */
void TregionFindNthActive(int xid, unsigned int n, unsigned int * firstPage, unsigned int * size);
void TregionFindNthActive(int xid, pageid_t n, pageid_t * firstPage, pageid_t * size);
int TregionNextBoundaryTag(int xid, pageid_t*pid, boundary_tag *tag, int allocationManager);
int TregionReadBoundaryTag(int xid, pageid_t pid, boundary_tag *tag);

View file

@ -375,6 +375,15 @@ static inline int32_t*
stasis_page_int32_ptr_from_end(Page *p, int count) {
return ((int32_t*)stasis_page_type_ptr(p))-count;
}
static inline pageid_t*
stasis_page_pageid_t_ptr_from_start(Page *p, int count) {
return ((pageid_t*)(p->memAddr))+count;
}
static inline pageid_t*
stasis_page_pageid_t_ptr_from_end(Page *p, int count) {
return ((pageid_t*)stasis_page_type_ptr(p))-count;
}
// Const methods
static inline const byte*
stasis_page_byte_cptr_from_start(const Page *p, int count) {
@ -403,6 +412,15 @@ static inline const int32_t*
stasis_page_int32_cptr_from_end(const Page *p, int count) {
return (const int32_t*)stasis_page_int32_ptr_from_end((Page*)p,count);
}
static inline const pageid_t*
stasis_page_pageid_t_cptr_from_start(const Page *p, int count) {
return ((const pageid_t*)(p->memAddr))+count;
}
static inline const pageid_t*
stasis_page_pageid_t_cptr_from_end(const Page *p, int count) {
return (const pageid_t*)stasis_page_pageid_t_cptr_from_end(p,count);
}
/*@}*/

View file

@ -24,7 +24,7 @@ below this block. level = 1 means that the pageid's point to 'normal'
pages. (They may be slotted (type = 1), or provided by some other
implementation).
@todo Does anything actually use indirect.h? Why doesn't arrayList use it?
@todo Does anything actually use indirect.h? ArrayList doesn't use it because accesing it is O(log n).
*/
@ -39,8 +39,8 @@ BEGIN_C_DECLS
#define level_ptr(page) stasis_page_int16_ptr_from_end((page), 3)
/** @todo indirect.h cannot handle 64 bit file offsets! */
#define page_ptr(page, offset) stasis_page_int32_ptr_from_start((page), 2*(offset))
#define maxslot_ptr(page, offset) stasis_page_int32_ptr_from_start((page), 2*(offset)+1)
#define page_ptr(page, offset) stasis_page_pageid_t_ptr_from_start((page), 2*(offset))
#define maxslot_ptr(page, offset) stasis_page_pageid_t_ptr_from_start((page), 2*(offset)+1)
#define INDIRECT_POINTERS_PER_PAGE (USABLE_SIZE_OF_PAGE / 16)
@ -51,7 +51,7 @@ BEGIN_C_DECLS
compensated_function recordid dereferenceIndirectRID(int xid, recordid rid);
void indirectInitialize(Page * p, int height);
compensated_function recordid rallocMany(/*int parentPage, lsn_t lsn,*/int xid, int recordSize, int recordCount);
compensated_function recordid rallocMany(int xid, int recordSize, int recordCount);
compensated_function int indirectPageRecordCount(int xid, recordid rid);
page_impl indirectImpl();

View file

@ -610,23 +610,13 @@ int Tbegin();
*
* @see operations.h set.h
*/
compensated_function void Tupdate(int xid, recordid rid,
compensated_function void Tupdate(int xid, pageid_t p,
const void *dat, size_t datlen, int op);
/**
@deprecated Only exists to work around swig/python limitations.
*/
compensated_function void TupdateStr(int xid, recordid rid,
compensated_function void TupdateStr(int xid, pageid_t page,
const char *dat, size_t datlen, int op);
/**
Like Tupdate(), but does not call stasis_record_dereference().
Tupdate() no longe calls stasis_record_dereference(), but has some
sanity checks that will make TupdateRaw() necessary until I'm sure
that no remaining code relies on the old behavior.
@deprecated If you need to call this function, be prepared to change your code.
*/
compensated_function void TupdateRaw(int xid, recordid rid,
const void *dat, size_t datlen, int op);
/**
* Read the value of a record.
*

View file

@ -75,10 +75,8 @@ START_TEST(bTreeTest)
index to tell how many entries are currently valid.
For now it will just return false if you try to add something to it
when it is already full.
This method doesn't use the Slot value of rid!
We make a copy of rid_caller so that the caller's copy doesn't change.
*/
int insert(int xid, Page* p, recordid rid_caller, int valueIn){
int insert(int xid, Page* p, int valueIn){
printf ("\nbegin insert\n");
int DEBUG = 0;
int DEBUGERROR = 1;
@ -89,7 +87,7 @@ int insert(int xid, Page* p, recordid rid_caller, int valueIn){
//printf("\npage->id = %d\n", p->id);
// make a copy of the rid - so we don't effect the caller's copy
recordid rid = rid_caller;
recordid rid = { p->id, 0, sizeof(int)};
// if DEBUGERROR ==1 this causes a seg fault below!
@ -179,10 +177,10 @@ int insert(int xid, Page* p, recordid rid_caller, int valueIn){
it to be a BTreeNode. Just puts the value 0 in the
first index of the page.
*/
void initializeNewBTreeNode(int xid, Page* p, recordid rid){
void initializeNewBTreeNode(int xid, Page* p){
// need access to the first slot
rid.slot = 0;
recordid rid = { p->id, 0, sizeof(int)};
// prepare the value to go into the first slot
int countInt = 0;
@ -197,14 +195,13 @@ void testFunctions(){
// getting things ready
int xid = Tbegin();
recordid rid1 = TfixedPageAlloc(xid, sizeof(int)); // this does the initialize
int pageid1 = rid1.page;
pageid_t pageid1 = TfixedPageAlloc(xid, sizeof(int)); // this does the initialize
Page * p1 = loadPage(xid, pageid1);
// calling functions
writelock(p1->rwlatch,0);
initializeNewBTreeNode(xid, p1, rid1);
insert(xid, p1, rid1, 3);
initializeNewBTreeNode(xid, p1);
insert(xid, p1, 3);
unlock(p1->rwlatch);
// cleaning up
@ -230,10 +227,7 @@ int SimpleExample(){
recordid rid1 = TfixedPageAlloc(xid, sizeof(int)); // this does the initialize
int pageid1 = rid1.page;
pageid_t pageid1 = TfixedPageAlloc(xid, sizeof(int)); // this does the initialize
Page * p1 = loadPage(xid, pageid1);
writelock(p1->rwlatch, 0);
@ -241,21 +235,19 @@ int SimpleExample(){
/* check consistency between rid & page's values
* for number of slots and record size */
// assert (rid1.slot == fixedPageCount(p1));
// assert (rid1.size == fixedPageRecordSize(p1));
assert (p1->id == rid1.page);
assert (p1->id == pageid1);
/* check to make sure page is recorded as a FIXED_PAGE */
assert( *stasis_page_type_ptr(p1) == FIXED_PAGE);
if (DEBUGP) { printf("\n%lld\n", (long long)rid1.page); }
if (DEBUGP) { printf("\n%lld\n", (long long)pageid1); }
byte * b1 = (byte *) malloc (sizeof (int));
byte * b2 = (byte *) malloc (sizeof (int));
byte * b3 = (byte *) malloc (sizeof (int));
// int x = *recordcount_ptr(p1);
int x = 42; // rcs - recordcount_ptr is no longer exposed here...
int y = rid1.slot;
int y = 0; //rid1.slot;
int z = 256;
b1 = (byte *) & x;
@ -273,13 +265,12 @@ int SimpleExample(){
if (DEBUGP) { printf("\nz = %d\n", z);}
if (DEBUGP) { printf("\nb3 = %d\n", *b3);}
recordid rid2 = rid1;
rid2.slot = 0;
recordid rid1 = { pageid1, 0,sizeof(int)};
// @todo This is a messy way to do this...
stasis_record_write(xid, p1, 1, rid2, b1);
stasis_record_read(xid, p1, rid2, b2);
stasis_record_write(xid, p1, 1, rid1, b1);
stasis_record_read(xid, p1, rid1, b2);
if (DEBUGP) { printf("\nb2** = %d\n",*((int *) b2));}
// initializeNewBTreeNode(p1, rid1);

View file

@ -48,12 +48,12 @@ terms specified in this license.
#include "../check_includes.h"
#define LOG_NAME "check_regions.log"
long myrandom(long x) {
pageid_t myrandom(pageid_t x) {
double xx = x;
double r = random();
double max = ((uint64_t)RAND_MAX)+1;
max /= xx;
return (long)((r/max));
return (pageid_t)((r/max));
}
@ -64,9 +64,9 @@ START_TEST(regions_smokeTest) {
Tinit();
int xid = Tbegin();
int max_page = 0;
int page = TregionAlloc(xid, 100, 0);
int new_page = page;
pageid_t max_page = 0;
pageid_t page = TregionAlloc(xid, 100, 0);
pageid_t new_page = page;
if(new_page + 1 + 100 > max_page) {
max_page = new_page + 1 + 100;
}
@ -77,7 +77,7 @@ START_TEST(regions_smokeTest) {
}
TregionDealloc(xid, page);
unsigned int pages[50];
pageid_t pages[50];
for(int i = 0; i < 50; i++) {
new_page = TregionAlloc(xid, 1, 0);
@ -113,7 +113,7 @@ START_TEST(regions_smokeTest) {
Tcommit(xid);
printf("\nMaximum space usage = %d, best possible = %d\n", max_page, 104); // peak storage usage = 100 pages + 1 page + 3 boundary pages.
printf("\nMaximum space usage = %lld, best possible = %d\n", max_page, 104); // peak storage usage = 100 pages + 1 page + 3 boundary pages.
Tdeinit();
}
@ -125,13 +125,13 @@ START_TEST(regions_randomizedTest) {
printf("Seed = %ld: ", seed);
srandom(seed);
int xid = Tbegin();
unsigned int pagesAlloced = 0;
unsigned int regionsAlloced = 0;
pageid_t pagesAlloced = 0;
pageid_t regionsAlloced = 0;
double max_blowup = 0;
unsigned int max_region_count = 0;
unsigned int max_waste = 0;
unsigned int max_size = 0;
unsigned int max_ideal_size = 0;
pageid_t max_region_count = 0;
pageid_t max_waste = 0;
pageid_t max_size = 0;
pageid_t max_ideal_size = 0;
for(int i = 0; i < 10000; i++) {
if(!(i % 100)) {
Tcommit(xid);
@ -148,9 +148,9 @@ START_TEST(regions_randomizedTest) {
regionsAlloced ++;
} else {
if(regionsAlloced) {
unsigned int victim = myrandom(regionsAlloced);
unsigned int victimSize;
unsigned int victimPage;
pageid_t victim = myrandom(regionsAlloced);
pageid_t victimSize;
pageid_t victimPage;
TregionFindNthActive(xid, victim, &victimPage, &victimSize);
TregionDealloc(xid, victimPage);
pagesAlloced -= victimSize;
@ -161,14 +161,14 @@ START_TEST(regions_randomizedTest) {
}
if(regionsAlloced) {
unsigned int lastRegionStart;
unsigned int lastRegionSize;
pageid_t lastRegionStart;
pageid_t lastRegionSize;
TregionFindNthActive(xid, regionsAlloced-1, &lastRegionStart, &lastRegionSize);
unsigned int length = lastRegionStart + lastRegionSize+1;
unsigned int ideal = pagesAlloced + regionsAlloced + 1;
pageid_t length = lastRegionStart + lastRegionSize+1;
pageid_t ideal = pagesAlloced + regionsAlloced + 1;
double blowup = (double)length/(double)ideal;
unsigned int bytes_wasted = length - ideal;
unsigned long long bytes_wasted = length - ideal;
// printf("Region count = %d, blowup = %d / %d = %5.2f\n", regionsAlloced, length, ideal, blowup);
if(max_blowup < blowup) {
max_blowup = blowup;
@ -197,7 +197,7 @@ START_TEST(regions_randomizedTest) {
// measure the actual and ideal page file sizes for this run.
printf("WARNING: Excessive blowup ");
}
printf("Max # of regions = %d, page file size = %5.2fM, ideal page file size = %5.2fM, (blowup = %5.2f)\n",
printf("Max # of regions = %lld, page file size = %5.2fM, ideal page file size = %5.2fM, (blowup = %5.2f)\n",
//peak bytes wasted = %5.2fM, blowup = %3.2f\n",
max_region_count,
((double)max_size * PAGE_SIZE)/(1024.0*1024.0),
@ -211,7 +211,7 @@ START_TEST(regions_randomizedTest) {
START_TEST(regions_lockSmokeTest) {
Tinit();
int xid = Tbegin();
int pageid = TregionAlloc(xid, 100,0);
pageid_t pageid = TregionAlloc(xid, 100,0);
fsckRegions(xid);
Tcommit(xid);
@ -240,7 +240,7 @@ START_TEST(regions_lockRandomizedTest) {
const int FUDGE = 10;
int xids[NUM_XACTS];
int * xidRegions[NUM_XACTS + FUDGE];
pageid_t * xidRegions[NUM_XACTS + FUDGE];
int xidRegionCounts[NUM_XACTS + FUDGE];
int longXid = Tbegin();
@ -252,13 +252,13 @@ START_TEST(regions_lockRandomizedTest) {
for(int i = 0; i < NUM_XACTS; i++) {
xids[i] = Tbegin();
assert(xids[i] < NUM_XACTS + FUDGE);
xidRegions[xids[i]] = malloc(sizeof(int) * NUM_OPS);
xidRegions[xids[i]] = malloc(sizeof(pageid_t) * NUM_OPS);
xidRegionCounts[xids[i]] = 0;
}
int activeXacts = NUM_XACTS;
for(int i = 0; i < NUM_OPS; i++) {
int j;
pageid_t j;
if(!(i % (NUM_OPS/NUM_XACTS))) {
// abort or commit one transaction randomly.
activeXacts --;
@ -288,7 +288,7 @@ START_TEST(regions_lockRandomizedTest) {
} else {
// free
if(xidRegionCounts[xids[j]]) {
int k = myrandom(xidRegionCounts[xids[j]]);
pageid_t k = myrandom(xidRegionCounts[xids[j]]);
TregionDealloc(xids[j], xidRegions[xids[j]][k]);
@ -315,7 +315,7 @@ START_TEST(regions_recoveryTest) {
Tinit();
unsigned int pages[50];
pageid_t pages[50];
int xid1 = Tbegin();
int xid2 = Tbegin();
for(int i = 0; i < 50; i+=2) {