switched to region allocator, but TpageAlloc currently grabs its own region...

This commit is contained in:
Sears Russell 2006-07-20 01:29:39 +00:00
parent 3bd79f4e24
commit ffd3bd960b
11 changed files with 165 additions and 486 deletions

View file

@ -84,7 +84,8 @@ terms specified in this license.
/*#define MAX_BUFFER_SIZE 20029 */
//#define MAX_BUFFER_SIZE 10007
//#define MAX_BUFFER_SIZE 5003
#define MAX_BUFFER_SIZE 2003
//#define MAX_BUFFER_SIZE 2003
#define MAX_BUFFER_SIZE 4006
/* #define MAX_BUFFER_SIZE 71 */
/*#define MAX_BUFFER_SIZE 7 */
@ -143,6 +144,9 @@ terms specified in this license.
#define OPERATION_OASYS_SEMIDIFF_DO 78
#define OPERATION_OASYS_SEMIDIFF_REDO 79
#define STORAGE_MANAGER_NAIVE_PAGE_ALLOC 1
/* number above should be less than number below */
#define MAX_OPERATIONS 80

View file

@ -9,9 +9,28 @@
a newly allocated region are undefined.
*/
int TregionAlloc(int xid, int pageCount, int allocaionManager);
void TregionFree(int xid, int firstPage);
int TregionSize(int xid, int firstPage);
typedef struct boundary_tag {
unsigned int size;
unsigned int prev_size;
int status;
int region_xid;
int allocation_manager;
} boundary_tag;
#define REGION_BASE (123)
#define REGION_VACANT (REGION_BASE + 0)
#define REGION_ZONED (REGION_BASE + 1)
#define REGION_OCCUPIED (REGION_BASE + 2)
#define REGION_CONDEMNED (REGION_BASE + 3)
void regionsInit();
unsigned int TregionAlloc(int xid, unsigned int pageCount, int allocaionManager);
void TregionDealloc(int xid, unsigned int firstPage);
unsigned int TregionSize(int xid, unsigned int firstPage);
/** Currently, this function is O(n) in the number of regions, so be careful! */
void TregionFindNthActive(int xid, unsigned int n, unsigned int * firstPage, unsigned int * size);
Operation getAllocBoundaryTag();

View file

@ -144,69 +144,6 @@ void TallocInit() {
lastFreepage = UINT64_MAX;
}
/*compensated_function recordid TallocOld(int xid, long size) {
recordid rid;
// @todo How should blobs be handled? Should Talloc do it? If not,
// it's hard for apps to to use it... Similarly, with hints, Talloc
// may need to route certain sizes to certain types of pages (eg;
// small sizes go to fixed page implementations...)
int isBlob = size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT;
if(isBlob) {
try_ret(NULLRID) {
rid = preAllocBlob(xid, size);
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
} end_ret(NULLRID);
} else {
Page * p = NULL;
begin_action_ret(pthread_mutex_unlock, &talloc_mutex, NULLRID) {
pthread_mutex_lock(&talloc_mutex);
if(lastFreepage == UINT64_MAX) {
try_ret(NULLRID) {
lastFreepage = TpageAlloc(xid);
} end_ret(NULLRID);
try_ret(NULLRID) {
p = loadPage(xid, lastFreepage);
} end_ret(NULLRID);
assert(*page_type_ptr(p) == UNINITIALIZED_PAGE);
slottedPageInitialize(p);
} else {
try_ret(NULLRID) {
p = loadPage(xid, lastFreepage);
} end_ret(NULLRID);
}
if(slottedFreespace(p) < size ) {
releasePage(p);
try_ret(NULLRID) {
lastFreepage = TpageAlloc(xid);
} end_ret(NULLRID);
try_ret(NULLRID) {
p = loadPage(xid, lastFreepage);
} end_ret(NULLRID);
slottedPageInitialize(p);
}
rid = slottedRawRalloc(p, size); // <--- Important part.
Tupdate(xid, rid, NULL, OPERATION_ALLOC); // <--- This hardcodes "slotted" Should we use TallocFromPage() instead?
// @todo does releasePage do the correct error checking? <- Why is this comment here?
releasePage(p);
} compensate_ret(NULLRID);
}
return rid;
}*/
static compensated_function recordid TallocFromPageInternal(int xid, Page * p, unsigned long size);
compensated_function recordid Talloc(int xid, unsigned long size) {

View file

@ -10,110 +10,14 @@
#include "../page/fixed.h"
#include <alloca.h>
#ifdef REUSE_PAGES
static int freelist;
#endif
static int freepage;
static pthread_mutex_t pageAllocMutex;
/*int __pageAlloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
int type = *(int*)d;
*page_type_ptr(p) = type;
/ ** @todo this sort of thing should be done in a centralized way. * /
if(type == SLOTTED_PAGE) {
slottedPageInitialize(p);
}
return 0;
}
int __pageDealloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
*page_type_ptr(p) = UNINITIALIZED_PAGE;
return 0;
}
*/
int __pageSet(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
memcpy(p->memAddr, d, PAGE_SIZE);
pageWriteLSN(xid, p, lsn);
return 0;
}
typedef struct {
int before;
int after;
} update_tuple;
int __update_freepage(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
assert(r.page == 0);
const update_tuple * t = d;
/* printf("freepage %d -> %d\n", t->before, t->after);
fflush(NULL); */
* headerFreepage_ptr(p) = t->after;
freepage = t->after;
pageWriteLSN(xid, p, lsn);
return 0;
}
int __update_freespace_inverse(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
#ifdef REUSE_PAGES
assert(r.page == 0);
const update_tuple * t = d;
/* ("freespace %d <- %d\n", t->before, t->after);
fflush(NULL); */
* headerFreepage_ptr(p) = t->before;
freepage = t->before;
#endif
pageWriteLSN(xid, p, lsn);
return 0;
}
#ifdef REUSE_PAGES
/** @todo need to hold mutex here... */
int __update_freelist(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
assert(r.page == 0);
const update_tuple * t = d;
/* printf("freelist %d -> %d\n", t->before, t->after);
fflush(NULL); */
* headerFreepagelist_ptr(p) = t->after;
freelist = t->after;
pageWriteLSN(p, lsn);
return 0;
}
int __update_freelist_inverse(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
assert(r.page == 0);
const update_tuple * t = d;
/* printf("freelist %d <- %d\n", t->before, t->after);
fflush(NULL); */
* headerFreepagelist_ptr(p) = t->before;
freelist = t->before;
pageWriteLSN(p, lsn);
return 0;
}
#endif
int __free_page(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
const int * successor = d;
/*printf("Unallocing page %d\n", r.page);
fflush(NULL); */
memset(p->memAddr, 0, PAGE_SIZE);
*page_type_ptr(p) = LLADD_FREE_PAGE;
*nextfreepage_ptr(p) = *successor;
pageWriteLSN(xid, p, lsn);
return 0;
}
int __alloc_freed(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
memset(p->memAddr, 0, PAGE_SIZE);
pageWriteLSN(xid, p, lsn);
return 0;
}
compensated_function int TpageGet(int xid, int pageid, byte *memAddr) {
Page * q = 0;
try_ret(compensation_error()) {
@ -135,35 +39,13 @@ compensated_function int TpageSet(int xid, int pageid, byte * memAddr) {
}
/** This needs to be called immediately after the storefile is opened,
since it needs to perform raw, synchronous I/O on the pagefile for
bootstrapping purposes. */
/**
This calls loadPage and releasePage directly, and bypasses the
logger.
*/
compensated_function void pageOperationsInit() {
/* Page p;
p.rwlatch = initlock();
p.loadlatch = initlock();
// assert(!posix_memalign((void **)&(p.memAddr), PAGE_SIZE, PAGE_SIZE));
p.id = 0;*/
Page * p;
try {
p = loadPage(-1, 0);
assert(!compensation_error());
} end;
/** Release lock on page zero. */
if(*page_type_ptr(p) != LLADD_HEADER_PAGE) {
/*printf("Writing new LLADD header\n"); fflush(NULL); */
headerPageInitialize(p);
} else {
/*printf("Found LLADD header.\n"); fflush(NULL);*/
}
#ifdef REUSE_PAGES
freelist = *headerFreepagelist_ptr(p);
#endif
freepage = *headerFreepage_ptr(p);
assert(freepage);
releasePage(p);
regionsInit();
pthread_mutex_init(&pageAllocMutex, NULL);
}
@ -201,104 +83,14 @@ compensated_function void pageOperationsInit() {
*/
compensated_function int TpageDealloc(int xid, int pageid) {
#ifdef REUSE_PAGES
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, -1) {
recordid rid;
update_tuple t;
pthread_mutex_lock(&pageAllocMutex);
rid.page = pageid;
rid.slot = 0;
rid.size = 0;
assert(freelist != pageid);
t.before = freelist;
Tupdate(xid, rid, &freelist, OPERATION_FREE_PAGE);
t.after = pageid;
freelist = pageid;
rid.page = 0;
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREELIST);
pthread_mutex_unlock(&pageAllocMutex);
} end_action_ret(-1);
#endif
TregionDealloc(xid, pageid); // @todo inefficient hack!
return 0;
}
compensated_function int TpageAlloc(int xid /*, int type */) {
recordid rid;
update_tuple t;
rid.slot = 0;
rid.size = 0;
pthread_mutex_lock(&pageAllocMutex);
int newpage;
/*printf("TpageAlloc\n"); fflush(NULL); */
#ifdef REUSE_PAGES
if(freelist) {
DEBUG("Re-using old page: %d\n", freelist);
newpage = freelist;
Page * p;
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
p = loadPage(newpage); /* Could obtain write lock here,
but this is the only function
that should ever touch pages of
type LLADD_FREE_PAGE, and we
already hold a mutex... */
} end_ret(compensation_error());
assert(*page_type_ptr(p) == LLADD_FREE_PAGE);
t.before = freelist;
freelist = *nextfreepage_ptr(p);
t.after = freelist;
assert(newpage != freelist);
releasePage(p);
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
rid.page = newpage;
Tupdate(xid, rid, &freelist, OPERATION_ALLOC_FREED);
rid.page = 0;
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREELIST);
} end_ret;
rid.page = newpage;
} else {
#endif
/*printf("Allocing new page: %d\n", freepage);
fflush(NULL); */
t.before = freepage;
newpage = freepage;
freepage++;
t.after = freepage;
/*printf("next freepage: %d\n", freepage); */
/* Don't need to touch the new page. */
rid.page = 0;
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREESPACE);
} end_action_ret(compensation_error());
rid.page = newpage;
#ifdef REUSE_PAGES
}
#endif
pthread_mutex_unlock(&pageAllocMutex);
/*printf("TpageAlloc alloced page %d\n", newpage); fflush(NULL); */
return newpage;
return TregionAlloc(xid, 1, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
}
int __fixedPageAlloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
fixedPageInitialize(p, r.size, recordsPerPage(r.size));
pageWriteLSN(xid, p, lsn);
@ -307,9 +99,6 @@ int __fixedPageAlloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
/**
@todo TfixedPageAlloc is a huge hack, and it writes an extra 4k to
the log each time it is called.
@return a recordid. The page field contains the page that was
allocated, the slot field contains the number of slots on the
apge, and the size field contains the size of each slot.
@ -318,16 +107,6 @@ recordid TfixedPageAlloc(int xid, int size) {
int page = TpageAlloc(xid);
recordid rid = {page, recordsPerPage(size), size};
Tupdate(xid, rid, 0, OPERATION_FIXED_PAGE_ALLOC);
/* Page * p = loadPage(xid, page);
fixedPageInitialize(p , size, recordsPerPage(size));
byte * tmpMemAddr = alloca(PAGE_SIZE);
memcpy(tmpMemAddr, p->memAddr, PAGE_SIZE);
TpageSet(xid, page, tmpMemAddr);
releasePage(p);
recordid rid;
rid.page = page;
rid.slot = recordsPerPage(size);
rid.size = size; */
return rid;
}
@ -342,29 +121,8 @@ Operation getFixedPageAlloc() {
}
compensated_function int TpageAllocMany(int xid, int count /*, int type*/) {
/* int firstPage = -1;
int lastPage = -1; */
recordid rid;
rid.slot = 0;
rid.size = 0;
update_tuple t;
pthread_mutex_lock(&pageAllocMutex);
t.before = freepage;
int newpage = freepage;
freepage += count;
t.after = freepage;
/* Don't need to touch the new pages. */
rid.page = 0;
begin_action_ret(pthread_mutex_unlock, &pageAllocMutex, compensation_error()) {
Tupdate(xid, rid, &t, OPERATION_UPDATE_FREESPACE);
rid.page = newpage;
} compensate_ret(compensation_error());
return newpage;
return TregionAlloc(xid, count, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
// return 0;//newpage;
}
/** Safely allocating and freeing pages is suprisingly complex. Here is a summary of the process:
@ -391,111 +149,19 @@ compensated_function int TpageAllocMany(int xid, int count /*, int type*/) {
*/
Operation getUpdateFreespace() {
Operation o = {
OPERATION_UPDATE_FREESPACE,
sizeof(update_tuple),
/* OPERATION_UPDATE_FREESPACE_INVERSE, */ OPERATION_NOOP,
&__update_freepage
};
return o;
}
Operation getUpdateFreespaceInverse() {
Operation o = {
OPERATION_UPDATE_FREESPACE_INVERSE,
sizeof(update_tuple),
OPERATION_UPDATE_FREESPACE,
&__update_freespace_inverse
};
return o;
}
Operation getUpdateFreelist() {
Operation o = {
OPERATION_UPDATE_FREELIST,
sizeof(update_tuple),
OPERATION_NOOP,
#ifdef REUSE_PAGES
&__update_freelist
#else
NULL
#endif
};
return o;
}
Operation getUpdateFreelistInverse() {
Operation o = {
OPERATION_UPDATE_FREELIST_INVERSE,
sizeof(update_tuple),
OPERATION_UPDATE_FREELIST,
#ifdef REUSE_PAGES
&__update_freelist_inverse
#else
NULL
#endif
};
return o;
}
/** frees a page by zeroing it, setting its type to LLADD_FREE_PAGE,
and setting the successor pointer. This operation physically logs
a whole page, which makes it expensive. Doing so is necessary in
general, but it is possible that application specific logic could
avoid the physical logging here. */
Operation getFreePageOperation() {
Operation o = {
OPERATION_FREE_PAGE,
sizeof(int),
NO_INVERSE_WHOLE_PAGE,
&__free_page
};
return o;
}
avoid the physical logging here.
/** allocs a page that was once freed by zeroing it. */
Operation getAllocFreedPage() {
Operation o = {
OPERATION_ALLOC_FREED,
sizeof(int),
OPERATION_UNALLOC_FREED,
&__alloc_freed
};
return o;
}
/** does the same thing as getFreePageOperation, but doesn't log a preimage. (Used to undo an alloc of a freed page.) */
Operation getUnallocFreedPage() {
Operation o = {
OPERATION_UNALLOC_FREED,
sizeof(int),
OPERATION_ALLOC_FREED,
&__free_page
};
return o;
}
Instead, we should just record the fact that the page was freed
somewhere. That way, we don't need to read the page in, or write
out information about it. If we lock the page against
reallocation until the current transaction commits, then we're
fine.
/*Operation getPageAlloc() {
Operation o = {
OPERATION_PAGE_ALLOC,
sizeof(int),
OPERATION_PAGE_DEALLOC,
&__pageAlloc
};
return o;
}
Operation getPageDealloc() {
Operation o = {
OPERATION_PAGE_DEALLOC,
sizeof(int),
OPERATION_PAGE_ALLOC,
&__pageDealloc
};
return o;
}*/
*/
Operation getPageSet() {
Operation o = {

View file

@ -2,23 +2,11 @@
#include <lladd/operations.h>
#include "../page/slotted.h"
#include <assert.h>
#define REGION_BASE (123)
#define REGION_VACANT (REGION_BASE + 0)
#define REGION_ZONED (REGION_BASE + 1)
#define REGION_OCCUPIED (REGION_BASE + 2)
#define REGION_CONDEMNED (REGION_BASE + 3)
#define INVALID_XID (-1)
#define boundary_tag_ptr(p) (((byte*)end_of_usable_space_ptr((p)))-sizeof(boundary_tag_t))
typedef struct boundary_tag {
int size;
int prev_size;
int status;
int region_xid;
int allocation_manager;
} boundary_tag;
static int operate_alloc_boundary_tag(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
slottedPageInitialize(p);
@ -29,19 +17,26 @@ static int operate_alloc_boundary_tag(int xid, Page * p, lsn_t lsn, recordid rid
}
// TODO: Implement these four functions.
static void TallocBoundaryTag(int xid, int page, boundary_tag* tag) {
static void TallocBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
// printf("Alloc boundary tag at %d\n", page);
recordid rid = {page, 0, sizeof(boundary_tag)};
Tupdate(xid, rid, tag, OPERATION_ALLOC_BOUNDARY_TAG);
}
static void TdeallocBoundaryTag(int xid, int page) {
// no-op
static void TdeallocBoundaryTag(int xid, unsigned int page) {
//no-op
}
static void TreadBoundaryTag(int xid, int page, boundary_tag* tag) {
static void TreadBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
// printf("Reading boundary tag at %d\n", page);
recordid rid = { page, 0, sizeof(boundary_tag) };
Tread(xid, rid, tag);
Page * p = loadPage(xid, page);
// printf("regions.c: %d\n", *page_type_ptr(p)); fflush(NULL);
assert(*page_type_ptr(p) == BOUNDARY_TAG_PAGE);
releasePage(p);
}
static void TsetBoundaryTag(int xid, int page, boundary_tag* tag) {
static void TsetBoundaryTag(int xid, unsigned int page, boundary_tag* tag) {
// printf("Writing boundary tag at %d\n", page);
recordid rid = { page, 0, sizeof(boundary_tag) };
Tset(xid, rid, tag);
}
@ -52,60 +47,70 @@ void regionsInit() {
releasePage(p);
if(pageType != BOUNDARY_TAG_PAGE) {
boundary_tag t;
t.size = INT32_MAX;
t.prev_size = INT32_MAX;
t.size = UINT32_MAX;
t.prev_size = UINT32_MAX;
t.status = REGION_VACANT;
t.region_xid = INVALID_XID;
t.allocation_manager = 0;
TallocBoundaryTag(-1, 0, &t);
// This does what TallocBoundaryTag(-1, 0, &t); would do, but it
// doesn't produce a log entry. The log entry would be invalid
// since we haven't initialized everything yet. We don't need to
// flush the page, since this code is deterministic, and will be
// re-run before recovery if this update doesn't make it to disk
// after a crash.
recordid rid = {0,0,sizeof(boundary_tag)};
Page * p = loadPage (-1, 0);
operate_alloc_boundary_tag(0,p,0,rid,&t);
releasePage(p);
}
}
pthread_mutex_t region_mutex = PTHREAD_MUTEX_INITIALIZER;
int TregionAlloc(int xid, int pageCount, int allocationManager) {
unsigned int TregionAlloc(int xid, unsigned int pageCount, int allocationManager) {
// Initial implementation. Naive first fit.
pthread_mutex_lock(&region_mutex);
int pageid = 0;
unsigned int pageid = 0;
boundary_tag t;
int prev_size = INT32_MAX;
unsigned int prev_size = UINT32_MAX;
TreadBoundaryTag(xid, pageid, &t); // XXX need to check if there is a boundary tag there or not!
while(t.status != REGION_VACANT || t.size < pageCount) { // TODO: This while loop and the boundary tag manipulation below should be factored into two submodules.
// printf("t.status = %d, REGION_VACANT = %d, t.size = %d, pageCount = %d\n", t.status, REGION_VACANT, t.size, pageCount);
assert(t.prev_size == prev_size);
prev_size = t.size;
pageid += ( t.size + 1 );
TreadBoundaryTag(xid, pageid, &t);
}
// printf("page = %d, t.status = %d, REGION_VACANT = %d, t.size = %d, pageCount = %d (alloced)\n", pageid, t.status, REGION_VACANT, t.size, pageCount);
t.status = REGION_ZONED;
t.region_xid = xid;
t.allocation_manager = allocationManager;
assert(t.prev_size = prev_size);
assert(t.prev_size == prev_size);
if(t.size != pageCount) {
// need to split region
// allocate new boundary tag.
int newPageid = pageid + pageCount + 1;
unsigned int newPageid = pageid + pageCount + 1;
boundary_tag new_tag;
if(t.size != INT32_MAX) {
if(t.size != UINT32_MAX) {
new_tag.size = t.size - pageCount - 1; // pageCount must be strictly less than t->size, so this is non-negative.
boundary_tag succ_tag;
TreadBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
succ_tag.prev_size = pageCount;
succ_tag.prev_size = new_tag.size;
TsetBoundaryTag(xid, pageid + t.size + 1, &succ_tag);
} else {
new_tag.size = INT32_MAX;
new_tag.size = UINT32_MAX;
}
new_tag.prev_size = pageCount;
@ -121,14 +126,19 @@ int TregionAlloc(int xid, int pageCount, int allocationManager) {
}
t.status = REGION_ZONED;
t.region_xid = xid;
t.allocation_manager = allocationManager;
t.size = pageCount;
TsetBoundaryTag(xid, pageid, &t);
pthread_mutex_unlock(&region_mutex);
return pageid;
return pageid+1;
}
void TregionFree(int xid, int firstPage) {
void TregionDealloc(int xid, unsigned int firstPage) {
// Note that firstPage is the first *caller visible* page in the
// region. The boundary tag is stored on firstPage - 1. Also, note
@ -141,15 +151,18 @@ void TregionFree(int xid, int firstPage) {
boundary_tag t;
TreadBoundaryTag(xid, firstPage - 1, &t);
assert(t.status != REGION_VACANT);
t.status = REGION_VACANT;
// If successor is vacant, merge.
if(t.size != INT32_MAX) { // is there a successor?
int succ_page = firstPage + t.size;
if(t.size != UINT32_MAX) { // is there a successor?
unsigned int succ_page = firstPage + t.size;
boundary_tag succ_tag;
TreadBoundaryTag(xid, succ_page, &succ_tag);
// TODO: Check page_type_ptr()...
if(succ_tag.size == INT32_MAX) {
t.size = INT32_MAX;
if(succ_tag.size == UINT32_MAX) {
t.size = UINT32_MAX;
// TODO: Truncate page file.
TdeallocBoundaryTag(xid, succ_page);
@ -157,7 +170,7 @@ void TregionFree(int xid, int firstPage) {
} else if(succ_tag.status == REGION_VACANT) {
t.size = t.size + succ_tag.size + 1;
int succ_succ_page = succ_page + succ_tag.size + 1;
unsigned int succ_succ_page = succ_page + succ_tag.size + 1;
boundary_tag succ_succ_tag;
@ -175,16 +188,16 @@ void TregionFree(int xid, int firstPage) {
// creates a situation where the current page is not a boundary
// tag...)
if(t.prev_size != INT32_MAX) {
if(t.prev_size != UINT32_MAX) {
int pred_page = (firstPage - 2) - t.prev_size; // If the predecessor is length zero, then it's boundary tag is two pages before this region's tag.
unsigned int pred_page = (firstPage - 2) - t.prev_size; // If the predecessor is length zero, then it's boundary tag is two pages before this region's tag.
boundary_tag pred_tag;
TreadBoundaryTag(xid, pred_page, &pred_tag);
if(pred_tag.status == REGION_VACANT) {
if(t.size == INT32_MAX) {
pred_tag.size = INT32_MAX;
if(t.size == UINT32_MAX) {
pred_tag.size = UINT32_MAX;
// TODO: truncate region
@ -192,7 +205,8 @@ void TregionFree(int xid, int firstPage) {
pred_tag.size += (t.size + 1);
int succ_page = firstPage + t.size;
unsigned int succ_page = firstPage + t.size;
assert(pred_page + pred_tag.size + 1 == succ_page);
boundary_tag succ_tag;
TreadBoundaryTag(xid, succ_page, &succ_tag);
@ -200,7 +214,7 @@ void TregionFree(int xid, int firstPage) {
TsetBoundaryTag(xid, succ_page, &succ_tag);
assert(succ_tag.status != REGION_VACANT);
assert(succ_page - pred_page == pred_tag.size);
assert(succ_page - pred_page - 1 == pred_tag.size);
}
TsetBoundaryTag(xid, pred_page, &pred_tag);
@ -219,13 +233,39 @@ void TregionFree(int xid, int firstPage) {
Operation getAllocBoundaryTag() {
Operation o = {
OPERATION_ALLOC_BOUNDARY_TAG,
sizeof(int),
sizeof(boundary_tag),
OPERATION_NOOP,
&operate_alloc_boundary_tag
};
return o;
}
void TregionFindNthActive(int xid, unsigned int regionNumber, unsigned int * firstPage, unsigned int * size) {
boundary_tag t;
recordid rid = {0, 0, sizeof(boundary_tag)};
Tread(xid, rid, &t);
unsigned int prevSize = 0;
while(t.status == REGION_VACANT) {
rid.page += (t.size + 1);
Tread(xid, rid, &t);
assert(t.size != UINT_MAX);
assert(t.prev_size != UINT_MAX);
assert(prevSize == t.prev_size || !prevSize);
prevSize = t.size;
}
for(int i = 0; i < regionNumber; i++) {
rid.page += (t.size + 1);
Tread(xid, rid, &t);
if(t.status == REGION_VACANT) { i--; }
assert(t.size != UINT_MAX);
assert(t.prev_size != UINT_MAX || i == 0);
assert(prevSize == t.prev_size || !prevSize);
prevSize = t.size;
}
*firstPage = rid.page+1;
*size = t.size;
}
/*Operation getAllocRegion() {

View file

@ -1,12 +1,26 @@
#include "../page.h"
#include "header.h"
#include <assert.h>
int headerPageInitialize() {
Page * p;
try_ret(0) {
p = loadPage(-1, 0);
assert(!compensation_error());
} end_ret(0);
int freePage;
if(*page_type_ptr(p) != LLADD_HEADER_PAGE) {
assert(*page_type_ptr(p) == 0) ;
memset(p->memAddr, 0, PAGE_SIZE);
*page_type_ptr(p) = LLADD_HEADER_PAGE;
*headerFreepage_ptr(p) = 1;
*headerFreepagelist_ptr(p) = 0;
}
void headerPageInitialize(Page * page) {
memset(page->memAddr, 0, PAGE_SIZE);
*page_type_ptr(page) = LLADD_HEADER_PAGE;
*headerFreepage_ptr(page) = 1;
*headerFreepagelist_ptr(page) = 0;
freePage = *headerFreepage_ptr(p);
releasePage(p);
assert(freePage);
return freePage;
}
void freePage(Page * freepage, long freepage_id, Page * headerpage) {

View file

@ -1,5 +1,6 @@
void headerPageInitialize(Page * p);
//void headerPageInitialize(Page * p);
int headerPageInitialize();
void freePageInitialize(Page * freepage, Page *headerpage);
#define headerFreepage_ptr(page) ints_from_end((page), 1)
#define headerFreepagelist_ptr(page) ints_from_end((page), 2)

View file

@ -51,14 +51,14 @@ void setupOperationsTable() {
operationsTable[OPERATION_PAGE_DEALLOC] = getPageDealloc(); */
operationsTable[OPERATION_PAGE_SET] = getPageSet();
operationsTable[OPERATION_UPDATE_FREESPACE] = getUpdateFreespace();
/* operationsTable[OPERATION_UPDATE_FREESPACE] = getUpdateFreespace();
operationsTable[OPERATION_UPDATE_FREESPACE_INVERSE] = getUpdateFreespaceInverse();
operationsTable[OPERATION_UPDATE_FREELIST] = getUpdateFreelist();
operationsTable[OPERATION_UPDATE_FREELIST_INVERSE] = getUpdateFreelistInverse();
operationsTable[OPERATION_FREE_PAGE] = getFreePageOperation();
operationsTable[OPERATION_ALLOC_FREED] = getAllocFreedPage();
operationsTable[OPERATION_UNALLOC_FREED] = getUnallocFreedPage();
operationsTable[OPERATION_UNALLOC_FREED] = getUnallocFreedPage(); */
operationsTable[OPERATION_NOOP] = getNoop();
operationsTable[OPERATION_INSTANT_SET] = getInstantSet();
operationsTable[OPERATION_ARRAY_LIST_ALLOC] = getArrayListAlloc();
@ -109,7 +109,6 @@ int Tinit() {
LogInit(loggerType);
try_ret(compensation_error()) {
pageOperationsInit();
} end_ret(compensation_error());
@ -126,10 +125,10 @@ int Tinit() {
InitiateRecovery();
truncationInit();
/*truncationInit();
if(lladd_enableAutoTruncation) {
autoTruncate(); // should this be before InitiateRecovery?
}
}*/
return 0;
}

View file

@ -2,7 +2,7 @@
if HAVE_LIBCHECK
## Had to disable check_lht because lht needs to be rewritten.
TESTS = check_lhtable check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect check_pageOperations check_linearHash check_logicalLinearHash check_header check_linkedListNTA check_linearHashNTA check_pageOrientedList check_lockManager check_compensations check_errorHandling check_ringbuffer check_iterator check_multiplexer check_bTree
TESTS = check_lhtable check_logEntry check_logWriter check_page check_operations check_transactional2 check_recovery check_blobRecovery check_bufferManager check_indirect check_pageOperations check_linearHash check_logicalLinearHash check_header check_linkedListNTA check_linearHashNTA check_pageOrientedList check_lockManager check_compensations check_errorHandling check_ringbuffer check_iterator check_multiplexer check_bTree check_regions
#check_lladdhash
else
TESTS =

View file

@ -29,7 +29,7 @@ void initializePages() {
for(i = 0 ; i < NUM_PAGES; i++) {
Page * p;
recordid rid;
rid.page = i;
rid.page = i+1;
rid.slot = 0;
rid.size = sizeof(int);
p = loadPage(-1, rid.page);
@ -61,7 +61,7 @@ void * workerThread(void * p) {
printf("%d", i / 50); fflush(NULL);
}
rid.page = k;
rid.page = k+1;
rid.slot = 0;
rid.size = sizeof(int);
@ -69,7 +69,7 @@ void * workerThread(void * p) {
readRecord(1, p, rid, &j);
assert(rid.page == k);
assert(rid.page == k+1);
p->LSN = 0;
*lsn_ptr(p) = 0;

View file

@ -110,8 +110,6 @@ long myrandom(long x) {
return (long)((r/max));
}
//#define myrandom(x)(
// (long) ( ((double)x) * ((double)random()) / ((double)RAND_MAX) ) )
#define MAXSETS 1000
#define MAXSETLEN 10000
@ -125,10 +123,11 @@ char * itoa(int i) {
START_TEST(lhtableRandomized) {
for(int jjj = 0; jjj < NUM_ITERS; jjj++) {
time_t seed = time(0);
printf("\nSeed = %ld\n", seed);
if(jjj) {
printf("\nSeed = %ld\n", seed);
srandom(seed);
} else {
printf("\nSeed = %d\n", 1150241705);
srandom(1150241705); // This seed gets the random number generator to hit RAND_MAX, which makes a good test for myrandom()
}