Move page type to Page_s struct. This breaks indirect.c (which is poorly thought out, and will be removed shortly), and was the next step on the road to segment-based recovery

This commit is contained in:
Sears Russell 2009-07-07 19:20:22 +00:00
parent b66328192c
commit 2593de7a2c
24 changed files with 219 additions and 228 deletions

View file

@ -7,3 +7,4 @@ param
callbacks
mutex
todo
writeback

View file

@ -51,7 +51,7 @@ void stasis_blob_write(int xid, Page * p, recordid rid, const void* dat) {
for(; (chunk+1) * USABLE_SIZE_OF_PAGE < rid.size; chunk++) {
Page * cnk = loadPage(xid, rec.offset+chunk);
writelock(cnk->rwlatch,0);
if(*stasis_page_type_ptr(cnk) != BLOB_PAGE) {
if(cnk->pageType != BLOB_PAGE) {
stasis_page_blob_initialize(cnk);
}
unlock(cnk->rwlatch);
@ -61,7 +61,7 @@ void stasis_blob_write(int xid, Page * p, recordid rid, const void* dat) {
}
Page * cnk = loadPage(xid, rec.offset+chunk);
writelock(cnk->rwlatch,0);
if(*stasis_page_type_ptr(cnk) != BLOB_PAGE) {
if(p->pageType != BLOB_PAGE) {
stasis_page_blob_initialize(cnk);
}
unlock(cnk->rwlatch);
@ -86,6 +86,7 @@ static void stasis_page_blob_cleanup(Page *p) { }
page_impl stasis_page_blob_impl() {
page_impl pi = {
BLOB_PAGE,
1,
0, //read,
0, //write,
0, //readDone
@ -115,5 +116,5 @@ void stasis_page_blob_initialize(Page * p) {
assertlocked(p->rwlatch);
DEBUG("lsn: %lld\n",(long long)p->LSN);
stasis_page_cleanup(p);
*stasis_page_type_ptr(p) = BLOB_PAGE;
p->pageType = BLOB_PAGE;
}

View file

@ -283,7 +283,7 @@ static Page * bhLoadPageImpl_helper(int xid, const pageid_t pageid, int uninitia
memset(ret->memAddr,0,PAGE_SIZE);
*stasis_page_lsn_ptr(ret) = ret->LSN;
ret->dirty = 0;
stasis_page_loaded(ret);
stasis_page_loaded(ret, UNKNOWN_TYPE_PAGE);
}
*pagePendingPtr(ret) = 0;
// Would remove from lru, but getFreePage() guarantees that it isn't

View file

@ -284,7 +284,7 @@ static Page* bufManGetPage(pageid_t pageid, int locktype, int uninitialized) {
*stasis_page_lsn_ptr(ret) = ret->LSN;
// XXX need mutex for this call?
stasis_page_loaded(ret);
stasis_page_loaded(ret, UNKNOWN_TYPE_PAGE);
}
writeunlock(ret->loadlatch);

View file

@ -71,7 +71,7 @@ static void pfPageRead(stasis_page_handle_t * h, Page *ret) {
}
ret->dirty = 0;
stasis_page_loaded(ret);
stasis_page_loaded(ret, UNKNOWN_TYPE_PAGE);
pthread_mutex_unlock(&stable_mutex);

View file

@ -223,7 +223,7 @@ static void stasis_alloc_register_old_regions() {
for(pageid_t i = 0; i < t.size; i++) {
Page * p = loadPage(-1, boundary + i);
readlock(p->rwlatch,0);
if(*stasis_page_type_ptr(p) == SLOTTED_PAGE) {
if(p->pageType == SLOTTED_PAGE) {
availablePage * next = malloc(sizeof(availablePage));
next->pageid = boundary+i;
next->freespace = stasis_record_freespace(-1, p);

View file

@ -91,7 +91,7 @@ static int array_list_op_init_header(const LogEntry* e, Page* p) {
stasis_record_write(e->xid, p, firstDataPageRid, (const byte*)&firstDataPage);
*stasis_page_type_ptr(p) = ARRAY_LIST_PAGE;
p->pageType = ARRAY_LIST_PAGE;
return 0;
}

View file

@ -278,7 +278,7 @@ recordid TlsmCreate(int xid, int comparator,
Page *p = loadPage(xid, ret.page);
writelock(p->rwlatch,0);
stasis_fixed_initialize_page(p, sizeof(lsmTreeNodeRecord) + keySize, 0);
*stasis_page_type_ptr(p) = LSM_ROOT_PAGE;
p->pageType = LSM_ROOT_PAGE;
lsmTreeState *state = malloc(sizeof(lsmTreeState));
state->lastLeaf = -1; /// XXX define something in constants.h?
@ -403,8 +403,7 @@ static recordid appendInternalNode(int xid, Page *p,
pageid_t val_page, pageid_t lastLeaf,
lsm_page_allocator_t allocator,
void *allocator_state) {
assert(*stasis_page_type_ptr(p) == LSM_ROOT_PAGE ||
*stasis_page_type_ptr(p) == FIXED_PAGE);
assert(p->pageType == LSM_ROOT_PAGE || p->pageType == FIXED_PAGE);
if(!depth) {
// leaf node.
recordid ret = stasis_record_alloc_begin(xid, p, sizeof(lsmTreeNodeRecord)+key_len);
@ -720,7 +719,7 @@ pageid_t TlsmLastPage(int xid, recordid tree) {
}
Page * root = loadPage(xid, tree.page);
readlock(root->rwlatch,0);
assert(*stasis_page_type_ptr(root) == LSM_ROOT_PAGE);
assert(root->pageType == LSM_ROOT_PAGE);
lsmTreeState *state = root->impl;
int keySize = getKeySize(xid,root);
if(state->lastLeaf == -1) {

View file

@ -3,7 +3,7 @@
#include <stasis/logger/reorderingHandle.h>
#include <string.h>
static int op_lsn_free_set(const LogEntry *e, Page *p) {
if(*stasis_page_type_ptr(p) != SLOTTED_LSN_FREE_PAGE) { abort() ; }
if(p->pageType != SLOTTED_LSN_FREE_PAGE) { abort() ; }
assert(e->update.arg_size >= (sizeof(pageoff_t) * 2));
int size = e->update.arg_size;
size -= (2*sizeof(pageoff_t));
@ -14,7 +14,7 @@ static int op_lsn_free_set(const LogEntry *e, Page *p) {
return 0;
}
static int op_lsn_free_unset(const LogEntry *e, Page *p) {
if(*stasis_page_type_ptr(p) != SLOTTED_LSN_FREE_PAGE) { return 0; }
if(p->pageType != SLOTTED_LSN_FREE_PAGE) { return 0; }
assert(e->update.arg_size >= (sizeof(pageoff_t) * 2));
int size = e->update.arg_size;
size -= (2*sizeof(pageoff_t));

View file

@ -120,7 +120,7 @@ compensated_function pageid_t TpageAllocMany(int xid, int count) {
int TpageGetType(int xid, pageid_t page) {
Page * p = loadPage(xid, page);
int ret = *stasis_page_type_ptr(p);
int ret = p->pageType;
releasePage(p);
return ret;
}

View file

@ -26,7 +26,7 @@ static int op_alloc_boundary_tag(const LogEntry* e, Page* p) {
stasis_slotted_initialize_page(p);
recordid rid = {p->id, 0, sizeof(boundary_tag)};
assert(e->update.arg_size == sizeof(boundary_tag));
*stasis_page_type_ptr(p) = BOUNDARY_TAG_PAGE;
p->pageType = BOUNDARY_TAG_PAGE;
stasis_record_alloc_done(e->xid, p, rid);
byte * buf = stasis_record_write_begin(e->xid, p, rid);
memcpy(buf, getUpdateArgs(e), stasis_record_length_read(e->xid, p, rid));
@ -135,10 +135,9 @@ static void TdeallocBoundaryTag(int xid, pageid_t page) {
void regionsInit() {
Page * p = loadPage(-1, 0);
int pageType = *stasis_page_type_ptr(p);
holding_mutex = pthread_self();
if(pageType != BOUNDARY_TAG_PAGE) {
if(p->pageType != BOUNDARY_TAG_PAGE) {
boundary_tag t;
t.size = PAGEID_T_MAX;
t.prev_size = PAGEID_T_MAX;

View file

@ -180,7 +180,7 @@ int stasis_record_read(int xid, Page * p, recordid rid, byte *buf) {
recordid stasis_record_dereference(int xid, Page * p, recordid rid) {
assertlocked(p->rwlatch);
int page_type = *stasis_page_type_ptr(p);
int page_type = p->pageType;
if(page_type == INDIRECT_PAGE) {
rid = dereferenceIndirectRID(xid, rid);
} else if(page_type == ARRAY_LIST_PAGE) {
@ -194,90 +194,90 @@ recordid stasis_record_dereference(int xid, Page * p, recordid rid) {
const byte * stasis_record_read_begin(int xid, Page * p, recordid rid) {
assertlocked(p->rwlatch);
int page_type = *stasis_page_type_ptr(p);
int page_type = p->pageType;
assert(page_type);
return page_impls[page_type].recordRead(xid, p, rid);
}
byte * stasis_record_write_begin(int xid, Page * p, recordid rid) {
assertlocked(p->rwlatch);
int page_type = *stasis_page_type_ptr(p);
int page_type = p->pageType;
assert(page_type);
assert(stasis_record_length_read(xid, p, rid) == stasis_record_type_to_size(rid.size));
return page_impls[page_type].recordWrite(xid, p, rid);
}
void stasis_record_read_done(int xid, Page *p, recordid rid, const byte *b) {
int page_type = *stasis_page_type_ptr(p);
int page_type = p->pageType;
if(page_impls[page_type].recordReadDone) {
page_impls[page_type].recordReadDone(xid,p,rid,b);
}
}
void stasis_record_write_done(int xid, Page *p, recordid rid, byte *b) {
int page_type = *stasis_page_type_ptr(p);
int page_type = p->pageType;
if(page_impls[page_type].recordWriteDone) {
page_impls[page_type].recordWriteDone(xid,p,rid,b);
}
}
int stasis_record_type_read(int xid, Page *p, recordid rid) {
assertlocked(p->rwlatch);
if(page_impls[*stasis_page_type_ptr(p)].recordGetType)
return page_impls[*stasis_page_type_ptr(p)].recordGetType(xid, p, rid);
if(page_impls[p->pageType].recordGetType)
return page_impls[p->pageType].recordGetType(xid, p, rid);
else
return INVALID_SLOT;
}
void stasis_record_type_write(int xid, Page *p, recordid rid, int type) {
assertlocked(p->rwlatch);
page_impls[*stasis_page_type_ptr(p)]
page_impls[p->pageType]
.recordSetType(xid, p, rid, type);
}
int stasis_record_length_read(int xid, Page *p, recordid rid) {
assertlocked(p->rwlatch);
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.recordGetLength(xid,p,rid);
}
recordid stasis_record_first(int xid, Page * p){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.recordFirst(xid,p);
}
recordid stasis_record_next(int xid, Page * p, recordid prev){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.recordNext(xid,p,prev);
}
recordid stasis_record_alloc_begin(int xid, Page * p, int size){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.recordPreAlloc(xid,p,size);
}
void stasis_record_alloc_done(int xid, Page * p, recordid rid){
page_impls[*stasis_page_type_ptr(p)]
page_impls[p->pageType]
.recordPostAlloc(xid, p, rid);
}
void stasis_record_free(int xid, Page * p, recordid rid){
page_impls[*stasis_page_type_ptr(p)]
page_impls[p->pageType]
.recordFree(xid, p, rid);
}
int stasis_block_supported(int xid, Page * p){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.isBlockSupported(xid, p);
}
block_t * stasis_block_first(int xid, Page * p){
int t = *stasis_page_type_ptr(p);
int t = p->pageType;
return page_impls[t]
.blockFirst(xid, p);
}
block_t * stasis_block_next(int xid, Page * p, block_t * prev){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.blockNext(xid, p,prev);
}
void stasis_block_done(int xid, Page * p, block_t * done){
page_impls[*stasis_page_type_ptr(p)]
page_impls[p->pageType]
.blockDone(xid, p,done);
}
int stasis_record_freespace(int xid, Page * p){
return page_impls[*stasis_page_type_ptr(p)]
return page_impls[p->pageType]
.pageFreespace(xid, p);
}
void stasis_record_compact(Page * p){
page_impls[*stasis_page_type_ptr(p)]
page_impls[p->pageType]
.pageCompact(p);
}
/** @todo How should the LSN of pages without a page_type be handled?
@ -286,26 +286,32 @@ void stasis_record_compact(Page * p){
LSN-free pages, we'll need special "loadPageForAlloc(), and
loadPageOfType() methods (or something...)
*/
void stasis_page_loaded(Page * p){
short type = *stasis_page_type_ptr(p);
if(type) {
assert(page_impls[type].page_type == type);
page_impls[type].pageLoaded(p);
void stasis_page_loaded(Page * p, pagetype_t type){
p->pageType = (type == UNKNOWN_TYPE_PAGE) ? *stasis_page_type_ptr(p) : type;
if(p->pageType) {
assert(page_impls[p->pageType].page_type == p->pageType);
page_impls[p->pageType].pageLoaded(p);
} else {
p->LSN = *stasis_page_lsn_ptr(p); // XXX kludge - shouldn't special-case UNINITIALIZED_PAGE
}
}
void stasis_page_flushed(Page * p){
short type = *stasis_page_type_ptr(p);
pagetype_t type = p->pageType;
if(type) {
assert(page_impls[type].page_type == type);
if(page_impls[type].has_header) {
*stasis_page_type_ptr(p)= type;
*stasis_page_lsn_ptr(p) = p->LSN;
}
page_impls[type].pageFlushed(p);
} else {
*stasis_page_type_ptr(p)= type;
*stasis_page_lsn_ptr(p) = p->LSN;
}
}
void stasis_page_cleanup(Page * p) {
short type = *stasis_page_type_ptr(p);
short type = p->pageType;
if(type) {
assert(page_impls[type].page_type == type);
page_impls[type].pageCleanup(p);

View file

@ -13,7 +13,7 @@ int stasis_fixed_records_per_page(size_t size) {
void stasis_fixed_initialize_page(Page * page, size_t size, int count) {
assertlocked(page->rwlatch);
stasis_page_cleanup(page);
*stasis_page_type_ptr(page) = FIXED_PAGE;
page->pageType = FIXED_PAGE;
*recordsize_ptr(page) = size;
assert(count <= stasis_fixed_records_per_page(size));
*recordcount_ptr(page)= count;
@ -21,7 +21,7 @@ void stasis_fixed_initialize_page(Page * page, size_t size, int count) {
static void checkRid(Page * page, recordid rid) {
assertlocked(page->rwlatch);
assert(*stasis_page_type_ptr(page)); // any more specific breaks pages based on this one
assert(page->pageType); // any more specific breaks pages based on this one
assert(page->id == rid.page);
assert(*recordsize_ptr(page) == rid.size);
assert(stasis_fixed_records_per_page(rid.size) > rid.slot);
@ -65,7 +65,7 @@ static void fixedSetType(int xid, Page *p, recordid rid, int type) {
}
static int fixedGetLength(int xid, Page *p, recordid rid) {
assertlocked(p->rwlatch);
assert(*stasis_page_type_ptr(p));
assert(p->pageType);
return rid.slot > *recordcount_ptr(p) ?
INVALID_SLOT : stasis_record_type_to_size(*recordsize_ptr(p));
}
@ -124,6 +124,7 @@ void fixedCleanup(Page *p) { }
page_impl fixedImpl() {
static page_impl pi = {
FIXED_PAGE,
1,
fixedRead,
fixedWrite,
0,// readDone

View file

@ -11,7 +11,7 @@
void indirectInitialize(Page * p, int height) {
stasis_page_cleanup(p);
*level_ptr(p) = height;
*stasis_page_type_ptr(p) = INDIRECT_PAGE;
p->pageType = INDIRECT_PAGE;
memset(p->memAddr, INVALID_SLOT, ((size_t)level_ptr(p)) - ((size_t)p->memAddr));
}
/** @todo Is locking for dereferenceRID really necessary? */
@ -24,7 +24,7 @@ compensated_function recordid dereferenceIndirectRID(int xid, recordid rid) {
// printf("a"); fflush(stdout);
int offset = 0;
int max_slot;
while(*stasis_page_type_ptr(page) == INDIRECT_PAGE) {
while(page->pageType == INDIRECT_PAGE) {
int i = 0;
for(max_slot = *maxslot_ptr(page, i); ( max_slot + offset ) <= rid.slot; max_slot = *maxslot_ptr(page, i)) {
i++;
@ -78,7 +78,7 @@ compensated_function recordid rallocMany(int xid, int recordSize, int recordCoun
recordid ret;
int page;
try_ret(NULLRID) {
page = TpageAlloc(xid/*, SLOTTED_PAGE*/);
page = TpageAlloc(xid);
}end_ret(NULLRID);
try_ret(NULLRID) {
ret = __rallocMany(xid, page, recordSize, recordCount);
@ -111,7 +111,7 @@ compensated_function recordid __rallocMany(int xid, pageid_t parentPage, int rec
p.memAddr = buffer;
p.rwlatch = initlock();
p.loadlatch = initlock();
*stasis_page_type_ptr(&p) = UNINITIALIZED_PAGE;
p.pageType = UNINITIALIZED_PAGE;
if(number_of_pages > 1) {
@ -206,7 +206,7 @@ compensated_function int indirectPageRecordCount(int xid, recordid rid) {
readlock(p->rwlatch, 0);
int i = 0;
unsigned int ret;
if(*stasis_page_type_ptr(p) == INDIRECT_PAGE) {
if(p->pageType == INDIRECT_PAGE) {
while(*maxslot_ptr(p, i) > 0) {
i++;
@ -216,7 +216,7 @@ compensated_function int indirectPageRecordCount(int xid, recordid rid) {
} else {
ret = (*maxslot_ptr(p, i-1)) - 1;
}
} else if (*stasis_page_type_ptr(p) == SLOTTED_PAGE) {
} else if (p->pageType == SLOTTED_PAGE) {
int numslots = *numslots_ptr(p);
ret = 0;
@ -247,6 +247,7 @@ void indirectFlushed(Page *p) {
void indirectCleanup(Page *p) { }
static page_impl pi = {
INDIRECT_PAGE,
1,
0, //read,
0, //write,
0, //readDone

View file

@ -1,12 +1,10 @@
#include <stasis/page.h>
#include <stasis/page/indirect.h>
#include <stasis/page/slotted.h>
#include <stasis/logger/logger2.h>
void stasis_slotted_lsn_free_initialize_page(Page * p) {
stasis_slotted_initialize_page(p);
*stasis_page_type_ptr(p) = SLOTTED_LSN_FREE_PAGE;
*stasis_page_lsn_ptr(p) = -1;
p->pageType = SLOTTED_LSN_FREE_PAGE;
}
// XXX still not correct; need to have an "LSN_FREE" constant.
static void lsnFreeLoaded(Page * p) {

View file

@ -21,7 +21,7 @@ static inline void slottedFsck(const Page const * page) {
dummy.id = -1;
dummy.memAddr = 0;
const short page_type = *stasis_page_type_cptr(page);
const short page_type = page->pageType;
const short numslots = *numslots_cptr(page);
const short freespace = *freespace_cptr(page);
const short freelist = *freelist_cptr(page);
@ -203,7 +203,7 @@ void slottedPageDeinit() {
void stasis_slotted_initialize_page(Page * page) {
assertlocked(page->rwlatch);
stasis_page_cleanup(page);
*stasis_page_type_ptr(page) = SLOTTED_PAGE;
page->pageType = SLOTTED_PAGE;
*freespace_ptr(page) = 0;
*numslots_ptr(page) = 0;
*freelist_ptr(page) = INVALID_SLOT;
@ -526,6 +526,7 @@ static void slottedCleanup(Page *p) { }
page_impl slottedImpl() {
static page_impl pi = {
SLOTTED_PAGE,
1,
slottedRead,
slottedWrite,
0,// readDone

View file

@ -40,7 +40,7 @@ static void phRead(stasis_page_handle_t * ph, Page * ret) {
}
}
ret->dirty = 0;
stasis_page_loaded(ret);
stasis_page_loaded(ret, UNKNOWN_TYPE_PAGE);
unlock(ret->rwlatch);
}
static void phForce(stasis_page_handle_t * ph) {

View file

@ -110,6 +110,9 @@ typedef int32_t slotid_t;
typedef uint16_t pageoff_t;
#define PAGEOFF_T_MAX UINT16_MAX;
typedef int16_t pagetype_t;
#define PAGETYPE_T_MAX INT16_MAX;
/*#define DEBUGGING */
/*#define PROFILE_LATCHES*/
/*#define NO_LATCHES */

View file

@ -227,7 +227,7 @@ static const short SLOT_TYPE_LENGTHS[] = { -1, -1, sizeof(blob_record_t), -1};
#define XPREPARE 8
/* Page types */
#define UNKNOWN_TYPE_PAGE (-1)
#define UNINITIALIZED_PAGE 0
#define SLOTTED_PAGE 1
#define INDIRECT_PAGE 2

View file

@ -123,6 +123,16 @@ BEGIN_C_DECLS
*/
struct Page_s {
pageid_t id;
/**
* The type of this page. Set when the page is loaded from disk. If the page contains
* a header, this will be set automatically. Otherwise, it must be passed in by the code
* that pinned the page.
*/
int pageType;
/**
* The LSN of the page (or an estimate). Set when page is loaded from disk.
* The on-page LSN (if any) is set at page writeback.
*/
lsn_t LSN;
byte *memAddr;
byte dirty;
@ -540,7 +550,7 @@ void stasis_record_free(int xid, Page * p, recordid rid);
int stasis_block_supported(int xid, Page * p);
int stasis_record_freespace(int xid, Page * p);
void stasis_record_compact(Page * p);
void stasis_page_loaded(Page * p);
void stasis_page_loaded(Page * p, pagetype_t type);
void stasis_page_flushed(Page * p);
void stasis_page_cleanup(Page * p);
/**
@ -677,7 +687,7 @@ void stasis_block_done(int xid, Page * p, block_t * done);
*/
typedef struct page_impl {
int page_type;
int has_header;
// ---------- Record access
/**

View file

@ -235,7 +235,7 @@ int SimpleExample(){
/* check to make sure page is recorded as a FIXED_PAGE */
assert( *stasis_page_type_ptr(p1) == FIXED_PAGE);
assert( p1->pageType == FIXED_PAGE);
if (DEBUGP) { printf("\n%lld\n", (long long)pageid1); }
byte * b1 = (byte *) malloc (sizeof (int));

View file

@ -102,11 +102,7 @@ START_TEST(indirectAlloc) {
Page * p = loadPage(xid, page);
int page_type = *stasis_page_type_ptr(p);
assert(page_type == SLOTTED_PAGE);
fail_unless(page_type == SLOTTED_PAGE, NULL);
assert(p->pageType == SLOTTED_PAGE);
releasePage(p);
@ -123,13 +119,7 @@ START_TEST(indirectAlloc) {
p = loadPage(xid, page);
page_type = *stasis_page_type_ptr(p);
assert(page_type == INDIRECT_PAGE);
fail_unless(page_type == INDIRECT_PAGE, NULL);
assert(p->pageType == INDIRECT_PAGE);
printf("{page = %lld, slot = %d, size = %lld}\n", (pageid_t)rid.page, rid.slot, (pageid_t)rid.size);
@ -147,13 +137,7 @@ START_TEST(indirectAlloc) {
p = loadPage(xid, page);
page_type = *stasis_page_type_ptr(p);
assert(page_type == INDIRECT_PAGE);
fail_unless(page_type == INDIRECT_PAGE, NULL);
assert(p->pageType == INDIRECT_PAGE);
printf("{page = %lld, slot = %d, size = %lld}\n", (pageid_t)rid.page, rid.slot, (pageid_t)rid.size);
@ -178,11 +162,7 @@ START_TEST(indirectAccessDirect) {
Page * p = loadPage(xid, page);
int page_type = *stasis_page_type_ptr(p);
assert(page_type == SLOTTED_PAGE);
fail_unless(page_type == SLOTTED_PAGE, NULL);
assert(p->pageType == SLOTTED_PAGE);
releasePage(p);
@ -223,11 +203,7 @@ START_TEST(indirectAccessIndirect) {
Page * p = loadPage(xid, page);
int page_type = *stasis_page_type_ptr(p);
assert(page_type == INDIRECT_PAGE);
fail_unless(page_type == INDIRECT_PAGE, NULL);
assert(p->pageType == INDIRECT_PAGE);
Tcommit(xid);
xid = Tbegin();

View file

@ -323,7 +323,7 @@ static void checkPageIterators(int xid, Page *p,int record_count) {
assertRecordCountSizeType(xid, p, record_count, sizeof(int64_t), NORMAL_SLOT);
if(*stasis_page_type_ptr(p) == SLOTTED_PAGE) {
if(p->pageType == SLOTTED_PAGE) {
recordid other = first;
other.slot = 3;
stasis_record_free(xid,p,other);

View file

@ -77,12 +77,11 @@ START_TEST(pageOpCheckRecovery) {
memset(p.memAddr, 1, USABLE_SIZE_OF_PAGE);
// Reset the page type after overwriting it with memset. Otherwise, Stasis
// will try to interpret it when it flushes the page to disk.
*stasis_page_type_ptr(&p) = 0;
p.pageType = 0;
TpageSetRange(xid, pageid1, 0, p.memAddr, USABLE_SIZE_OF_PAGE);
memset(p.memAddr, 2, USABLE_SIZE_OF_PAGE);
*stasis_page_type_ptr(&p) = 0;
TpageSetRange(xid, pageid2, 0, p.memAddr, USABLE_SIZE_OF_PAGE);
@ -121,23 +120,19 @@ START_TEST(pageOpCheckRecovery) {
assert(pageid2 != pageid3);
memset(p.memAddr, 3, USABLE_SIZE_OF_PAGE);
*stasis_page_type_ptr(&p) = 0;
TpageSetRange(xid, pageid3, 0, p.memAddr, USABLE_SIZE_OF_PAGE);
byte newAddr[USABLE_SIZE_OF_PAGE];
memset(p.memAddr, 1, USABLE_SIZE_OF_PAGE);
*stasis_page_type_ptr(&p) = 0;
TpageGet(xid, pageid1, newAddr);
assert(!memcmp(p.memAddr, newAddr, USABLE_SIZE_OF_PAGE));
memset(p.memAddr, 2, USABLE_SIZE_OF_PAGE);
*stasis_page_type_ptr(&p) = 0;
TpageGet(xid, pageid2, newAddr);
assert(!memcmp(p.memAddr, newAddr, USABLE_SIZE_OF_PAGE));
memset(p.memAddr, 3, USABLE_SIZE_OF_PAGE);
*stasis_page_type_ptr(&p) = 0;
TpageGet(xid, pageid3, newAddr);
assert(!memcmp(p.memAddr, newAddr, USABLE_SIZE_OF_PAGE));
Tcommit(xid);