DataPage::RegionAllocator -> RegionAllocator
git-svn-id: svn+ssh://svn.corp.yahoo.com/yahoo/yrl/labs/pnuts/code/logstore@682 8dad8b1f-cf64-0410-95b6-bcf113ffbcfe
This commit is contained in:
parent
54e73ab803
commit
5d533a7939
4 changed files with 123 additions and 112 deletions
106
datapage.h
106
datapage.h
|
@ -5,7 +5,7 @@
|
|||
|
||||
#include <stasis/page.h>
|
||||
#include <stasis/constants.h>
|
||||
|
||||
#include "regionAllocator.h"
|
||||
|
||||
//#define CHECK_FOR_SCRIBBLING
|
||||
|
||||
|
@ -58,110 +58,6 @@ public:
|
|||
|
||||
};
|
||||
|
||||
class RegionAllocator
|
||||
{
|
||||
public:
|
||||
|
||||
// Open an existing region allocator.
|
||||
RegionAllocator(int xid, recordid rid) :
|
||||
nextPage_(INVALID_PAGE),
|
||||
endOfRegion_(INVALID_PAGE) {
|
||||
rid_ = rid;
|
||||
Tread(xid, rid_, &header_);
|
||||
regionCount_ = TarrayListLength(xid, header_.region_list);
|
||||
}
|
||||
// Create a new region allocator.
|
||||
RegionAllocator(int xid, pageid_t region_length) :
|
||||
nextPage_(0),
|
||||
endOfRegion_(0),
|
||||
regionCount_(0)
|
||||
{
|
||||
rid_ = Talloc(xid, sizeof(header_));
|
||||
header_.region_list = TarrayListAlloc(xid, 1, 2, sizeof(pageid_t));
|
||||
header_.region_length = region_length;
|
||||
Tset(xid, rid_, &header_);
|
||||
}
|
||||
// XXX handle disk full?
|
||||
pageid_t alloc_extent(int xid, pageid_t extent_length) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
pageid_t ret = nextPage_;
|
||||
nextPage_ += extent_length;
|
||||
if(nextPage_ >= endOfRegion_) {
|
||||
ret = TregionAlloc(xid, header_.region_length, 42); // XXX assign a region allocator id
|
||||
TarrayListExtend(xid, header_.region_list, 1);
|
||||
recordid rid = header_.region_list;
|
||||
rid.slot = regionCount_;
|
||||
Tset(xid, rid, &ret);
|
||||
assert(extent_length <= header_.region_length); // XXX could handle this case if we wanted to. Would remove this error case, and not be hard.
|
||||
nextPage_ = ret + extent_length;
|
||||
endOfRegion_ = ret + header_.region_length;
|
||||
regionCount_++;
|
||||
assert(regionCount_ == TarrayListLength(xid, header_.region_list));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
bool grow_extent(pageid_t extension_length) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
nextPage_ += extension_length;
|
||||
return(nextPage_ < endOfRegion_);
|
||||
}
|
||||
void force_regions(int xid) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
pageid_t regionCount = TarrayListLength(xid, header_.region_list);
|
||||
for(recordid list_entry = header_.region_list;
|
||||
list_entry.slot < regionCount; list_entry.slot++) {
|
||||
pageid_t pid;
|
||||
Tread(xid, list_entry, &pid);
|
||||
TregionForce(xid, pid);
|
||||
}
|
||||
}
|
||||
void dealloc_regions(int xid) {
|
||||
pageid_t regionCount = TarrayListLength(xid, header_.region_list);
|
||||
|
||||
DEBUG("{%lld %lld %lld}\n", header_.region_list.page, (long long)header_.region_list.slot, (long long)header_.region_list.size);
|
||||
|
||||
for(recordid list_entry = header_.region_list;
|
||||
list_entry.slot < regionCount; list_entry.slot++) {
|
||||
pageid_t pid;
|
||||
Tread(xid, list_entry, &pid);
|
||||
#ifndef CHECK_FOR_SCRIBBLING // Don't actually free the page if we'll be checking that pages are used exactly once below.
|
||||
TregionDealloc(xid, pid);
|
||||
#endif
|
||||
}
|
||||
TarrayListDealloc(xid, header_.region_list);
|
||||
Tdealloc(xid, rid_);
|
||||
}
|
||||
pageid_t * list_regions(int xid, pageid_t * region_length, pageid_t * region_count) {
|
||||
*region_count = TarrayListLength(xid, header_.region_list);
|
||||
pageid_t * ret = (pageid_t*)malloc(sizeof(pageid_t) * *region_count);
|
||||
recordid rid = header_.region_list;
|
||||
for(pageid_t i = 0; i < *region_count; i++) {
|
||||
rid.slot = i;
|
||||
Tread(xid, rid, &ret[i]);
|
||||
}
|
||||
*region_length = header_.region_length;
|
||||
return ret;
|
||||
}
|
||||
void done() {
|
||||
nextPage_ = INVALID_PAGE;
|
||||
endOfRegion_ = INVALID_PAGE;
|
||||
}
|
||||
recordid header_rid() { return rid_; }
|
||||
private:
|
||||
typedef struct {
|
||||
recordid region_list;
|
||||
pageid_t region_length;
|
||||
} persistent_state;
|
||||
|
||||
recordid rid_;
|
||||
pageid_t nextPage_;
|
||||
pageid_t endOfRegion_;
|
||||
pageid_t regionCount_;
|
||||
persistent_state header_;
|
||||
public:
|
||||
static const size_t header_size = sizeof(persistent_state);
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
//to be used when reading an existing data page from disk
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <stasis/page.h>
|
||||
#include <stasis/truncation.h>
|
||||
|
||||
|
||||
#include "regionAllocator.h"
|
||||
#include "datapage.h"
|
||||
#include "tuplemerger.h"
|
||||
#include "datatuple.h"
|
||||
|
@ -44,11 +44,11 @@ public:
|
|||
typedef void(*diskTreeComponent_page_deallocator_t)(int, void *);
|
||||
|
||||
|
||||
internalNodes(int xid): region_alloc(new DataPage<datatuple>::RegionAllocator(xid, 10000)) {create(xid);} // XXX shouldn't hardcode region size.
|
||||
internalNodes(int xid): region_alloc(new RegionAllocator(xid, 10000)) {create(xid);} // XXX shouldn't hardcode region size.
|
||||
internalNodes(int xid, recordid root, recordid state, recordid dp_state)
|
||||
: tree_state(state),
|
||||
root_rec(root),
|
||||
region_alloc(new DataPage<datatuple>::RegionAllocator(xid, dp_state)) { lastLeaf = -1; }
|
||||
region_alloc(new RegionAllocator(xid, dp_state)) { lastLeaf = -1; }
|
||||
private:
|
||||
recordid create(int xid);
|
||||
public:
|
||||
|
@ -107,7 +107,7 @@ public:
|
|||
diskTreeComponent_page_allocator_t allocator,
|
||||
void *allocator_state);
|
||||
|
||||
inline DataPage<datatuple>::RegionAllocator* get_alloc() { return region_alloc; }
|
||||
inline RegionAllocator* get_alloc() { return region_alloc; }
|
||||
|
||||
/**
|
||||
Initialize a page for use as an internal node of the tree.
|
||||
|
@ -135,7 +135,7 @@ public:
|
|||
recordid tree_state;
|
||||
recordid root_rec;
|
||||
|
||||
DataPage<datatuple>::RegionAllocator* region_alloc;
|
||||
RegionAllocator* region_alloc;
|
||||
|
||||
public:
|
||||
class iterator {
|
||||
|
|
116
regionAllocator.h
Normal file
116
regionAllocator.h
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* regionAllocator.h
|
||||
*
|
||||
* Created on: Mar 9, 2010
|
||||
* Author: sears
|
||||
*/
|
||||
|
||||
#ifndef REGIONALLOCATOR_H_
|
||||
#define REGIONALLOCATOR_H_
|
||||
|
||||
class RegionAllocator
|
||||
{
|
||||
public:
|
||||
|
||||
// Open an existing region allocator.
|
||||
RegionAllocator(int xid, recordid rid) :
|
||||
nextPage_(INVALID_PAGE),
|
||||
endOfRegion_(INVALID_PAGE) {
|
||||
rid_ = rid;
|
||||
Tread(xid, rid_, &header_);
|
||||
regionCount_ = TarrayListLength(xid, header_.region_list);
|
||||
}
|
||||
// Create a new region allocator.
|
||||
RegionAllocator(int xid, pageid_t region_length) :
|
||||
nextPage_(0),
|
||||
endOfRegion_(0),
|
||||
regionCount_(0)
|
||||
{
|
||||
rid_ = Talloc(xid, sizeof(header_));
|
||||
header_.region_list = TarrayListAlloc(xid, 1, 2, sizeof(pageid_t));
|
||||
header_.region_length = region_length;
|
||||
Tset(xid, rid_, &header_);
|
||||
}
|
||||
// XXX handle disk full?
|
||||
pageid_t alloc_extent(int xid, pageid_t extent_length) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
pageid_t ret = nextPage_;
|
||||
nextPage_ += extent_length;
|
||||
if(nextPage_ >= endOfRegion_) {
|
||||
ret = TregionAlloc(xid, header_.region_length, 42); // XXX assign a region allocator id
|
||||
TarrayListExtend(xid, header_.region_list, 1);
|
||||
recordid rid = header_.region_list;
|
||||
rid.slot = regionCount_;
|
||||
Tset(xid, rid, &ret);
|
||||
assert(extent_length <= header_.region_length); // XXX could handle this case if we wanted to. Would remove this error case, and not be hard.
|
||||
nextPage_ = ret + extent_length;
|
||||
endOfRegion_ = ret + header_.region_length;
|
||||
regionCount_++;
|
||||
assert(regionCount_ == TarrayListLength(xid, header_.region_list));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
bool grow_extent(pageid_t extension_length) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
nextPage_ += extension_length;
|
||||
return(nextPage_ < endOfRegion_);
|
||||
}
|
||||
void force_regions(int xid) {
|
||||
assert(nextPage_ != INVALID_PAGE);
|
||||
pageid_t regionCount = TarrayListLength(xid, header_.region_list);
|
||||
for(recordid list_entry = header_.region_list;
|
||||
list_entry.slot < regionCount; list_entry.slot++) {
|
||||
pageid_t pid;
|
||||
Tread(xid, list_entry, &pid);
|
||||
TregionForce(xid, pid);
|
||||
}
|
||||
}
|
||||
void dealloc_regions(int xid) {
|
||||
pageid_t regionCount = TarrayListLength(xid, header_.region_list);
|
||||
|
||||
DEBUG("{%lld %lld %lld}\n", header_.region_list.page, (long long)header_.region_list.slot, (long long)header_.region_list.size);
|
||||
|
||||
for(recordid list_entry = header_.region_list;
|
||||
list_entry.slot < regionCount; list_entry.slot++) {
|
||||
pageid_t pid;
|
||||
Tread(xid, list_entry, &pid);
|
||||
#ifndef CHECK_FOR_SCRIBBLING // Don't actually free the page if we'll be checking that pages are used exactly once below.
|
||||
TregionDealloc(xid, pid);
|
||||
#endif
|
||||
}
|
||||
TarrayListDealloc(xid, header_.region_list);
|
||||
Tdealloc(xid, rid_);
|
||||
}
|
||||
pageid_t * list_regions(int xid, pageid_t * region_length, pageid_t * region_count) {
|
||||
*region_count = TarrayListLength(xid, header_.region_list);
|
||||
pageid_t * ret = (pageid_t*)malloc(sizeof(pageid_t) * *region_count);
|
||||
recordid rid = header_.region_list;
|
||||
for(pageid_t i = 0; i < *region_count; i++) {
|
||||
rid.slot = i;
|
||||
Tread(xid, rid, &ret[i]);
|
||||
}
|
||||
*region_length = header_.region_length;
|
||||
return ret;
|
||||
}
|
||||
void done() {
|
||||
nextPage_ = INVALID_PAGE;
|
||||
endOfRegion_ = INVALID_PAGE;
|
||||
}
|
||||
recordid header_rid() { return rid_; }
|
||||
private:
|
||||
typedef struct {
|
||||
recordid region_list;
|
||||
pageid_t region_length;
|
||||
} persistent_state;
|
||||
|
||||
recordid rid_;
|
||||
pageid_t nextPage_;
|
||||
pageid_t endOfRegion_;
|
||||
pageid_t regionCount_;
|
||||
persistent_state header_;
|
||||
public:
|
||||
static const size_t header_size = sizeof(persistent_state);
|
||||
};
|
||||
|
||||
|
||||
#endif /* REGIONALLOCATOR_H_ */
|
|
@ -48,8 +48,7 @@ void insertProbeIter(size_t NUM_ENTRIES)
|
|||
if(data_arr.size() > NUM_ENTRIES)
|
||||
data_arr.erase(data_arr.begin()+NUM_ENTRIES, data_arr.end());
|
||||
|
||||
DataPage<datatuple>::RegionAllocator * alloc
|
||||
= new DataPage<datatuple>::RegionAllocator(xid, 10000); // ~ 10 datapages per region.
|
||||
RegionAllocator * alloc = new RegionAllocator(xid, 10000); // ~ 10 datapages per region.
|
||||
|
||||
recordid alloc_state = Talloc(xid,sizeof(diskTreeComponent::internalNodes::RegionAllocConf_t));
|
||||
|
||||
|
|
Loading…
Reference in a new issue