More progress on boundary tags. (Still not tested/working yet...)
This commit is contained in:
parent
bf8b230bbd
commit
1ac3dc6779
6 changed files with 211 additions and 76 deletions
|
@ -132,17 +132,18 @@ terms specified in this license.
|
||||||
#define OPERATION_SET_RAW 33
|
#define OPERATION_SET_RAW 33
|
||||||
#define OPERATION_INSTANT_SET_RAW 34
|
#define OPERATION_INSTANT_SET_RAW 34
|
||||||
|
|
||||||
|
#define OPERATION_ALLOC_BOUNDARY_TAG 35
|
||||||
|
|
||||||
// these operations are specific to OASYS
|
// these operations are specific to OASYS
|
||||||
#define OPERATION_OASYS_DIFF_DO 35
|
#define OPERATION_OASYS_DIFF_DO 75
|
||||||
#define OPERATION_OASYS_DIFF_REDO 36
|
#define OPERATION_OASYS_DIFF_REDO 76
|
||||||
#define OPERATION_OASYS_DIFF_UNDO 37
|
#define OPERATION_OASYS_DIFF_UNDO 77
|
||||||
#define OPERATION_OASYS_SEMIDIFF_DO 38
|
#define OPERATION_OASYS_SEMIDIFF_DO 78
|
||||||
#define OPERATION_OASYS_SEMIDIFF_REDO 39
|
#define OPERATION_OASYS_SEMIDIFF_REDO 79
|
||||||
|
|
||||||
|
|
||||||
/* number above should be less than number below */
|
/* number above should be less than number below */
|
||||||
#define MAX_OPERATIONS 40
|
#define MAX_OPERATIONS 80
|
||||||
|
|
||||||
//#define SLOT_TYPE_BASE PAGE_SIZE
|
//#define SLOT_TYPE_BASE PAGE_SIZE
|
||||||
|
|
||||||
|
@ -194,6 +195,7 @@ extern const short SLOT_TYPE_LENGTHS[];
|
||||||
#define LLADD_FREE_PAGE 4
|
#define LLADD_FREE_PAGE 4
|
||||||
#define FIXED_PAGE 5
|
#define FIXED_PAGE 5
|
||||||
#define ARRAY_LIST_PAGE 6
|
#define ARRAY_LIST_PAGE 6
|
||||||
|
#define BOUNDARY_TAG_PAGE 7
|
||||||
|
|
||||||
/* Record types */
|
/* Record types */
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,12 @@
|
||||||
a newly allocated region are undefined.
|
a newly allocated region are undefined.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int TregionAlloc(int xid, int pageCount);
|
int TregionAlloc(int xid, int pageCount, int allocaionManager);
|
||||||
void TregionFree(int xid, int firstPage);
|
void TregionFree(int xid, int firstPage);
|
||||||
int TregionSize(int xid, int firstPage);
|
int TregionSize(int xid, int firstPage);
|
||||||
|
|
||||||
|
Operation getAllocBoundaryTag();
|
||||||
|
|
||||||
Operation getRegionAlloc();
|
Operation getRegionAlloc();
|
||||||
Operation getRegionFree();
|
Operation getRegionFree();
|
||||||
|
|
||||||
|
|
|
@ -1,107 +1,236 @@
|
||||||
#include "../page.h"
|
#include "../page.h"
|
||||||
#include <lladd/operations.h>
|
#include <lladd/operations.h>
|
||||||
/*
|
#include "../page/slotted.h"
|
||||||
|
#include <assert.h>
|
||||||
#define REGION_BASE (123)
|
#define REGION_BASE (123)
|
||||||
#define REGION_VACANT (REGION_BASE + 0)
|
#define REGION_VACANT (REGION_BASE + 0)
|
||||||
#define REGION_ZONED (REGION_BASE + 1)
|
#define REGION_ZONED (REGION_BASE + 1)
|
||||||
#define REGION_OCCUPIED (REGION_BASE + 2)
|
#define REGION_OCCUPIED (REGION_BASE + 2)
|
||||||
#define REGION_CONDEMNED (REGION_BASE + 3)
|
#define REGION_CONDEMNED (REGION_BASE + 3)
|
||||||
|
|
||||||
|
#define INVALID_XID (-1)
|
||||||
|
|
||||||
#define boundary_tag_ptr(p) (((byte*)end_of_usable_space_ptr((p)))-sizeof(boundary_tag_t))
|
#define boundary_tag_ptr(p) (((byte*)end_of_usable_space_ptr((p)))-sizeof(boundary_tag_t))
|
||||||
|
|
||||||
typedef struct boundary_tag_t {
|
typedef struct boundary_tag {
|
||||||
int size;
|
int size;
|
||||||
int prev_size;
|
int prev_size;
|
||||||
int status;
|
int status;
|
||||||
int region_xid;
|
int region_xid;
|
||||||
int allocation_manager;
|
int allocation_manager;
|
||||||
} boundary_tag_t;
|
} boundary_tag;
|
||||||
|
|
||||||
|
static int operate_alloc_boundary_tag(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
|
||||||
void boundaryTagInit(Page * p) {
|
slottedPageInitialize(p);
|
||||||
*page_type_ptr(p) = LLADD_BOUNDARY_TAG;
|
*page_type_ptr(p) = BOUNDARY_TAG_PAGE;
|
||||||
boundary_tag_t * tag = boundary_tag_ptr(p);
|
slottedPostRalloc(xid, p, lsn, rid);
|
||||||
tag.size = INT32_MAX;
|
slottedWrite(xid, p, lsn, rid, dat);
|
||||||
tag.prev_size = -1;
|
return 0;
|
||||||
tag.status = REGION_VACANT;
|
|
||||||
tag.region_xid = INVALID_XID;
|
|
||||||
tag.allocation_manager = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
regionsInit() {
|
// TODO: Implement these four functions.
|
||||||
Page * p = loadPage(0);
|
static void TallocBoundaryTag(int xid, int page, boundary_tag* tag) {
|
||||||
if(*page_type_ptr(p) != LLADD_BOUNDARY_TAG) {
|
recordid rid = {page, 0, sizeof(boundary_tag)};
|
||||||
assert(*page_type_ptr(p) == 0);
|
Tupdate(xid, rid, tag, OPERATION_ALLOC_BOUNDARY_TAG);
|
||||||
boundaryTagInit(p);
|
}
|
||||||
}
|
static void TdeallocBoundaryTag(int xid, int page) {
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
||||||
|
static void TreadBoundaryTag(int xid, int page, boundary_tag* tag) {
|
||||||
|
recordid rid = { page, 0, sizeof(boundary_tag) };
|
||||||
|
Tread(xid, rid, tag);
|
||||||
|
}
|
||||||
|
static void TsetBoundaryTag(int xid, int page, boundary_tag* tag) {
|
||||||
|
recordid rid = { page, 0, sizeof(boundary_tag) };
|
||||||
|
Tset(xid, rid, tag);
|
||||||
|
}
|
||||||
|
|
||||||
|
void regionsInit() {
|
||||||
|
Page * p = loadPage(-1, 0);
|
||||||
|
int pageType = *page_type_ptr(p);
|
||||||
releasePage(p);
|
releasePage(p);
|
||||||
|
if(pageType != BOUNDARY_TAG_PAGE) {
|
||||||
|
boundary_tag t;
|
||||||
|
t.size = INT32_MAX;
|
||||||
|
t.prev_size = INT32_MAX;
|
||||||
|
t.status = REGION_VACANT;
|
||||||
|
t.region_xid = INVALID_XID;
|
||||||
|
t.allocation_manager = 0;
|
||||||
|
TallocBoundaryTag(-1, 0, &t);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_t region_mutex = PTHREAD_MUTEX_INITIALIZER;
|
pthread_mutex_t region_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
int TregionAlloc(int xid, int pageCount, int allocationManager) {
|
int TregionAlloc(int xid, int pageCount, int allocationManager) {
|
||||||
// Initial implementation. Naive first fit.
|
// Initial implementation. Naive first fit.
|
||||||
|
|
||||||
pthread_mutex_lock(®ion_mutex);
|
pthread_mutex_lock(®ion_mutex);
|
||||||
int ret = -1;
|
|
||||||
Page * p = loadPage(0);
|
int pageid = 0;
|
||||||
boundary_tag_t * t = boundary_tag_ptr(p);
|
boundary_tag t;
|
||||||
while(t.status != REGION_VACANT || t.size < pageCount) { // XXX This while loop and the boundary tag manipulation below should be factored into two submodules.
|
int prev_size = INT32_MAX;
|
||||||
int nextPage = p->id + t.size;
|
|
||||||
releasePage(p);
|
|
||||||
p = loadPage(nextPage);
|
TreadBoundaryTag(xid, pageid, &t); // XXX need to check if there is a boundary tag there or not!
|
||||||
t = boundary_tag_ptr(p);
|
|
||||||
|
while(t.status != REGION_VACANT || t.size < pageCount) { // TODO: This while loop and the boundary tag manipulation below should be factored into two submodules.
|
||||||
|
prev_size = t.size;
|
||||||
|
pageid += ( t.size + 1 );
|
||||||
|
TreadBoundaryTag(xid, pageid, &t);
|
||||||
}
|
}
|
||||||
t->status = REGION_ZONED;
|
|
||||||
t->region_xid = xid;
|
t.status = REGION_ZONED;
|
||||||
t->allocation_manager = allocationManager;
|
t.region_xid = xid;
|
||||||
if(t->size != pageCount) {
|
t.allocation_manager = allocationManager;
|
||||||
|
assert(t.prev_size = prev_size);
|
||||||
|
if(t.size != pageCount) {
|
||||||
// need to split region
|
// need to split region
|
||||||
|
|
||||||
if(t.size != INT_MAX) {
|
|
||||||
|
|
||||||
// allocate new boundary tag.
|
// allocate new boundary tag.
|
||||||
int newRegionSize = t->size - pageCount - 1; // pageCount must be strictly less than t->size, so this is safe.
|
|
||||||
Page * new_tag = loadPage(p->id + pageCount + 1);
|
|
||||||
boundaryTagInit(p);
|
|
||||||
boundary_tag_ptr(p)->size = newRegionSize;
|
|
||||||
boundary_tag_ptr(p)->prev_size = pageCount;
|
|
||||||
boundary_tag_ptr(p)->status = REGION_EPHEMERAL; // region disappears if transaction aborts; is VACANT if it succeeds. GET RID OF EPHEMERAL; just make it vacant, and merge on abort.
|
|
||||||
boundary_tag_ptr(p)->region_xid = xid;
|
|
||||||
boundary_tag_ptr(p)->allocation_manager = 0;
|
|
||||||
releasePage(new_tag);
|
|
||||||
|
|
||||||
Page * next = loadPage(p->id + t.size + 1);
|
int newPageid = pageid + pageCount + 1;
|
||||||
boundary_tag_ptr(next)->prev_size = newRegionSize;
|
boundary_tag new_tag;
|
||||||
releasePage(next);
|
|
||||||
|
if(t.size != INT32_MAX) {
|
||||||
|
|
||||||
|
new_tag.size = t.size - pageCount - 1; // pageCount must be strictly less than t->size, so this is non-negative.
|
||||||
|
|
||||||
|
boundary_tag succ_tag;
|
||||||
|
|
||||||
|
TreadBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
|
||||||
|
succ_tag.prev_size = pageCount;
|
||||||
|
TsetBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
Page * new_tag = loadPage(p->id + pageCount + 1);
|
|
||||||
boundaryTagInit(p);
|
new_tag.size = INT32_MAX;
|
||||||
boundary_tag_ptr(p)->size = INT_MAX;
|
|
||||||
boundary_tag_ptr(p)->prev_size = pageCount;
|
|
||||||
boundary_tag_ptr(p)->status = REGION_EPHEMERAL;
|
|
||||||
boundary_tag_ptr(p)->region_xid = xid;
|
|
||||||
boundary_tag_ptr(p)->allocation_manager = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
releasePage(p);
|
new_tag.prev_size = pageCount;
|
||||||
|
// Create the new region, and disassociate it from this transaction immediately.
|
||||||
|
// This has two implications:
|
||||||
|
// - It could cause some fragmentation if interleaved transactions are allocating, and some abort.
|
||||||
|
// - Multiple transactions can allocate space at the end of the page file without blocking each other.
|
||||||
|
new_tag.status = REGION_VACANT;
|
||||||
|
new_tag.region_xid = INVALID_XID;
|
||||||
|
new_tag.allocation_manager = 0;
|
||||||
|
|
||||||
|
TallocBoundaryTag(xid, newPageid, &new_tag);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
TsetBoundaryTag(xid, pageid, &t);
|
||||||
|
|
||||||
pthread_mutex_unlock(®ion_mutex);
|
pthread_mutex_unlock(®ion_mutex);
|
||||||
|
|
||||||
|
return pageid;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TregionFree(int xid, int firstPage) {
|
void TregionFree(int xid, int firstPage) {
|
||||||
|
|
||||||
|
// Note that firstPage is the first *caller visible* page in the
|
||||||
|
// region. The boundary tag is stored on firstPage - 1. Also, note
|
||||||
|
// that a region of size N takes up N+1 pages on disk.
|
||||||
|
|
||||||
|
// Deferred coalescing would probably make sense...
|
||||||
|
|
||||||
|
pthread_mutex_lock(®ion_mutex);
|
||||||
|
|
||||||
|
boundary_tag t;
|
||||||
|
TreadBoundaryTag(xid, firstPage - 1, &t);
|
||||||
|
|
||||||
|
// If successor is vacant, merge.
|
||||||
|
if(t.size != INT32_MAX) { // is there a successor?
|
||||||
|
int succ_page = firstPage + t.size;
|
||||||
|
boundary_tag succ_tag;
|
||||||
|
TreadBoundaryTag(xid, succ_page, &succ_tag);
|
||||||
|
|
||||||
|
// TODO: Check page_type_ptr()...
|
||||||
|
if(succ_tag.size == INT32_MAX) {
|
||||||
|
t.size = INT32_MAX;
|
||||||
|
|
||||||
|
// TODO: Truncate page file.
|
||||||
|
TdeallocBoundaryTag(xid, succ_page);
|
||||||
|
|
||||||
|
} else if(succ_tag.status == REGION_VACANT) {
|
||||||
|
|
||||||
|
t.size = t.size + succ_tag.size + 1;
|
||||||
|
int succ_succ_page = succ_page + succ_tag.size + 1;
|
||||||
|
|
||||||
|
boundary_tag succ_succ_tag;
|
||||||
|
|
||||||
|
TreadBoundaryTag(xid, succ_succ_page, &succ_succ_tag);
|
||||||
|
succ_succ_tag.prev_size = t.size;
|
||||||
|
TsetBoundaryTag(xid, succ_succ_page, &succ_succ_tag);
|
||||||
|
|
||||||
|
TsetBoundaryTag(xid, succ_page, &succ_tag);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If predecessor is vacant, merge. (Doing this after the successor
|
||||||
|
// is merged makes life easier, since merging with the predecessor
|
||||||
|
// creates a situation where the current page is not a boundary
|
||||||
|
// tag...)
|
||||||
|
|
||||||
|
if(t.prev_size != INT32_MAX) {
|
||||||
|
|
||||||
|
int pred_page = (firstPage - 2) - t.prev_size; // If the predecessor is length zero, then it's boundary tag is two pages before this region's tag.
|
||||||
|
|
||||||
|
boundary_tag pred_tag;
|
||||||
|
TreadBoundaryTag(xid, pred_page, &pred_tag);
|
||||||
|
|
||||||
|
if(pred_tag.status == REGION_VACANT) {
|
||||||
|
if(t.size == INT32_MAX) {
|
||||||
|
pred_tag.size = INT32_MAX;
|
||||||
|
|
||||||
|
// TODO: truncate region
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
pred_tag.size += (t.size + 1);
|
||||||
|
|
||||||
|
int succ_page = firstPage + t.size;
|
||||||
|
|
||||||
|
boundary_tag succ_tag;
|
||||||
|
TreadBoundaryTag(xid, succ_page, &succ_tag);
|
||||||
|
succ_tag.prev_size = pred_tag.size;
|
||||||
|
TsetBoundaryTag(xid, succ_page, &succ_tag);
|
||||||
|
|
||||||
|
assert(succ_tag.status != REGION_VACANT);
|
||||||
|
assert(succ_page - pred_page == pred_tag.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
TsetBoundaryTag(xid, pred_page, &pred_tag);
|
||||||
|
TdeallocBoundaryTag(xid, firstPage -1);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
TsetBoundaryTag(xid, firstPage - 1, &t);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
TsetBoundaryTag(xid, firstPage - 1, &t);
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(®ion_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int TregionSize(int xid, int firstPage) {
|
|
||||||
|
Operation getAllocBoundaryTag() {
|
||||||
|
Operation o = {
|
||||||
|
OPERATION_ALLOC_BOUNDARY_TAG,
|
||||||
|
sizeof(int),
|
||||||
|
OPERATION_NOOP,
|
||||||
|
&operate_alloc_boundary_tag
|
||||||
|
};
|
||||||
|
return o;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*Operation getAllocRegion() {
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Operation getRegionAlloc() {
|
Operation getFreeRegion() {
|
||||||
|
|
||||||
}
|
}*/
|
||||||
|
|
||||||
Operation getRegionFree() {
|
|
||||||
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ void writeRecord(int xid, Page * p, lsn_t lsn, recordid rid, const void *dat) {
|
||||||
|
|
||||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||||
writeBlob(xid, p, lsn, rid, dat);
|
writeBlob(xid, p, lsn, rid, dat);
|
||||||
} else if(*page_type_ptr(p) == SLOTTED_PAGE) {
|
} else if(*page_type_ptr(p) == SLOTTED_PAGE || *page_type_ptr(p) == BOUNDARY_TAG_PAGE) {
|
||||||
slottedWrite(xid, p, lsn, rid, dat);
|
slottedWrite(xid, p, lsn, rid, dat);
|
||||||
} else if(*page_type_ptr(p) == FIXED_PAGE || *page_type_ptr(p)==ARRAY_LIST_PAGE || !*page_type_ptr(p) ) {
|
} else if(*page_type_ptr(p) == FIXED_PAGE || *page_type_ptr(p)==ARRAY_LIST_PAGE || !*page_type_ptr(p) ) {
|
||||||
fixedWrite(p, rid, dat);
|
fixedWrite(p, rid, dat);
|
||||||
|
@ -163,7 +163,7 @@ int readRecord(int xid, Page * p, recordid rid, void *buf) {
|
||||||
|
|
||||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||||
readBlob(xid, p, rid, buf);
|
readBlob(xid, p, rid, buf);
|
||||||
} else if(page_type == SLOTTED_PAGE) {
|
} else if(page_type == SLOTTED_PAGE || page_type == BOUNDARY_TAG_PAGE) {
|
||||||
slottedRead(xid, p, rid, buf);
|
slottedRead(xid, p, rid, buf);
|
||||||
/* FIXED_PAGES can function correctly even if they have not been
|
/* FIXED_PAGES can function correctly even if they have not been
|
||||||
initialized. */
|
initialized. */
|
||||||
|
@ -186,7 +186,7 @@ int readRecordUnlocked(int xid, Page * p, recordid rid, void *buf) {
|
||||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||||
abort(); /* Unsupported for now. */
|
abort(); /* Unsupported for now. */
|
||||||
readBlob(xid, p, rid, buf);
|
readBlob(xid, p, rid, buf);
|
||||||
} else if(page_type == SLOTTED_PAGE) {
|
} else if(page_type == SLOTTED_PAGE || page_type == BOUNDARY_TAG_PAGE) {
|
||||||
slottedReadUnlocked(xid, p, rid, buf);
|
slottedReadUnlocked(xid, p, rid, buf);
|
||||||
/* FIXED_PAGES can function correctly even if they have not been
|
/* FIXED_PAGES can function correctly even if they have not been
|
||||||
initialized. */
|
initialized. */
|
||||||
|
@ -207,7 +207,7 @@ int getRecordTypeUnlocked(int xid, Page * p, recordid rid) {
|
||||||
if(page_type == UNINITIALIZED_PAGE) {
|
if(page_type == UNINITIALIZED_PAGE) {
|
||||||
return UNINITIALIZED_RECORD;
|
return UNINITIALIZED_RECORD;
|
||||||
|
|
||||||
} else if(page_type == SLOTTED_PAGE) {
|
} else if(page_type == SLOTTED_PAGE || page_type == BOUNDARY_TAG_PAGE) {
|
||||||
if(*numslots_ptr(p) <= rid.slot || *slot_ptr(p, rid.slot) == INVALID_SLOT) {
|
if(*numslots_ptr(p) <= rid.slot || *slot_ptr(p, rid.slot) == INVALID_SLOT) {
|
||||||
return UNINITIALIZED_PAGE;
|
return UNINITIALIZED_PAGE;
|
||||||
} else if (*slot_length_ptr(p, rid.slot) == BLOB_SLOT) {
|
} else if (*slot_length_ptr(p, rid.slot) == BLOB_SLOT) {
|
||||||
|
@ -258,7 +258,7 @@ void writeRecordUnlocked(int xid, Page * p, lsn_t lsn, recordid rid, const void
|
||||||
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
if(rid.size > BLOB_THRESHOLD_SIZE) {
|
||||||
abort();
|
abort();
|
||||||
writeBlob(xid, p, lsn, rid, dat);
|
writeBlob(xid, p, lsn, rid, dat);
|
||||||
} else if(*page_type_ptr(p) == SLOTTED_PAGE) {
|
} else if(*page_type_ptr(p) == SLOTTED_PAGE || *page_type_ptr(p) == BOUNDARY_TAG_PAGE) {
|
||||||
slottedWriteUnlocked(xid, p, lsn, rid, dat);
|
slottedWriteUnlocked(xid, p, lsn, rid, dat);
|
||||||
} else if(*page_type_ptr(p) == FIXED_PAGE || *page_type_ptr(p)==ARRAY_LIST_PAGE || !*page_type_ptr(p) ) {
|
} else if(*page_type_ptr(p) == FIXED_PAGE || *page_type_ptr(p)==ARRAY_LIST_PAGE || !*page_type_ptr(p) ) {
|
||||||
fixedWriteUnlocked(p, rid, dat);
|
fixedWriteUnlocked(p, rid, dat);
|
||||||
|
|
|
@ -191,7 +191,7 @@ recordid slottedRawRalloc(Page * page, int size) {
|
||||||
assert(type != INVALID_SLOT);
|
assert(type != INVALID_SLOT);
|
||||||
|
|
||||||
writelock(page->rwlatch, 342);
|
writelock(page->rwlatch, 342);
|
||||||
assert(*page_type_ptr(page) == SLOTTED_PAGE);
|
assert(*page_type_ptr(page) == SLOTTED_PAGE || *page_type_ptr(page) == BOUNDARY_TAG_PAGE);
|
||||||
|
|
||||||
recordid rid;
|
recordid rid;
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
||||||
rid.size = BLOB_SLOT;
|
rid.size = BLOB_SLOT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(*page_type_ptr(page) != SLOTTED_PAGE) {
|
if(*page_type_ptr(page) != SLOTTED_PAGE && *page_type_ptr(page) != BOUNDARY_TAG_PAGE) {
|
||||||
/* slottedPreRalloc calls this when necessary. However, in
|
/* slottedPreRalloc calls this when necessary. However, in
|
||||||
the case of a crash, it is possible that
|
the case of a crash, it is possible that
|
||||||
slottedPreRalloc's updates were lost, so we need to check
|
slottedPreRalloc's updates were lost, so we need to check
|
||||||
|
|
|
@ -82,6 +82,8 @@ void setupOperationsTable() {
|
||||||
operationsTable[OPERATION_SET_RAW] = getSetRaw();
|
operationsTable[OPERATION_SET_RAW] = getSetRaw();
|
||||||
operationsTable[OPERATION_INSTANT_SET_RAW] = getInstantSetRaw();
|
operationsTable[OPERATION_INSTANT_SET_RAW] = getInstantSetRaw();
|
||||||
|
|
||||||
|
operationsTable[OPERATION_ALLOC_BOUNDARY_TAG] = getAllocBoundaryTag();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -230,7 +232,7 @@ void TreadUnlocked(int xid, recordid rid, void * dat) {
|
||||||
p = loadPage(xid, rid.page);
|
p = loadPage(xid, rid.page);
|
||||||
} end;
|
} end;
|
||||||
int page_type = *page_type_ptr(p);
|
int page_type = *page_type_ptr(p);
|
||||||
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || !page_type ) {
|
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || (!page_type) || page_type == BOUNDARY_TAG_PAGE ) {
|
||||||
|
|
||||||
} else if(page_type == INDIRECT_PAGE) {
|
} else if(page_type == INDIRECT_PAGE) {
|
||||||
releasePage(p);
|
releasePage(p);
|
||||||
|
@ -259,7 +261,7 @@ compensated_function void Tread(int xid, recordid rid, void * dat) {
|
||||||
p = loadPage(xid, rid.page);
|
p = loadPage(xid, rid.page);
|
||||||
} end;
|
} end;
|
||||||
int page_type = *page_type_ptr(p);
|
int page_type = *page_type_ptr(p);
|
||||||
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || !page_type ) {
|
if(page_type == SLOTTED_PAGE || page_type == FIXED_PAGE || (!page_type) || page_type == BOUNDARY_TAG_PAGE ) {
|
||||||
|
|
||||||
} else if(page_type == INDIRECT_PAGE) {
|
} else if(page_type == INDIRECT_PAGE) {
|
||||||
releasePage(p);
|
releasePage(p);
|
||||||
|
|
Loading…
Reference in a new issue