stasis-bLSM/logiterators.cpp
sears 7c2397340c Fixed a bunch of iterator bugs and racy merges. Added drop_database utility.
Bugfixes:
 - Atomically deallocate regions and update the logstore object
 - Proactively invalidate iterators after each merge (before, it would simply set a not-valid bit.  This doesn't work because iterators hold page pins, which breaks force-writes)
 - Clarify semantics of opening iterators mid-stream:  All calls now return iterators that return first key >= the requested one.  revalidate() needs the key > the requested one, so it calls peek(), then (if necessary) getnext().
 - Add asserts to check that the header is latched at update, and that tuples returned by iterators are strictly monotonically increasing'
 - Improve error handling in network.h  We still get (and terminate on) SIGPIPE.

Refactoring:
 - Add dispatch function to network.h.



git-svn-id: svn+ssh://svn.corp.yahoo.com/yahoo/yrl/labs/pnuts/code/logstore@620 8dad8b1f-cf64-0410-95b6-bcf113ffbcfe
2010-02-25 01:29:32 +00:00

144 lines
3.6 KiB
C++

#include "logstore.h"
#include "logiterators.h"
/////////////////////////////////////////////////////////////////////
// tree iterator implementation
/////////////////////////////////////////////////////////////////////
template <class TUPLE>
void diskTreeIterator<TUPLE>::init_iterators(TUPLE * key1, TUPLE * key2) {
assert(!key2); // unimplemented
if(tree_.size == INVALID_SIZE) {
lsmIterator_ = NULL;
} else {
if(key1) {
lsmIterator_ = diskTreeComponentIterator::openAt(-1, tree_, key1->key(), key1->keylen());
} else {
lsmIterator_ = diskTreeComponentIterator::open(-1, tree_);
}
}
}
template <class TUPLE>
diskTreeIterator<TUPLE>::diskTreeIterator(recordid tree) :
tree_(tree)
{
init_iterators(NULL,NULL);
init_helper(NULL);
}
template <class TUPLE>
diskTreeIterator<TUPLE>::diskTreeIterator(recordid tree, TUPLE& key) :
tree_(tree)
{
init_iterators(&key,NULL);
init_helper(&key);
}
template <class TUPLE>
diskTreeIterator<TUPLE>::diskTreeIterator(diskTreeComponent *tree) :
tree_(tree ? tree->get_root_rec() : NULLRID)
{
init_iterators(NULL, NULL);
init_helper(NULL);
}
template <class TUPLE>
diskTreeIterator<TUPLE>::diskTreeIterator(diskTreeComponent *tree, TUPLE& key) :
tree_(tree ? tree->get_root_rec() : NULLRID)
{
init_iterators(&key,NULL);
init_helper(&key);
}
template <class TUPLE>
diskTreeIterator<TUPLE>::~diskTreeIterator()
{
if(lsmIterator_)
diskTreeComponentIterator::close(-1, lsmIterator_);
if(curr_page!=NULL)
{
delete curr_page;
curr_page = 0;
}
}
template <class TUPLE>
void diskTreeIterator<TUPLE>::init_helper(TUPLE* key1)
{
if(!lsmIterator_)
{
DEBUG("treeIterator:\t__error__ init_helper():\tnull lsmIterator_");
curr_page = 0;
dp_itr = 0;
}
else
{
if(diskTreeComponentIterator::next(-1, lsmIterator_) == 0)
{
DEBUG("diskTreeIterator:\t__error__ init_helper():\tlogtreeIteratr::next returned 0." );
curr_page = 0;
dp_itr = 0;
}
else
{
pageid_t * pid_tmp;
pageid_t ** hack = &pid_tmp;
diskTreeComponentIterator::value(-1,lsmIterator_,(byte**)hack);
curr_pageid = *pid_tmp;
curr_page = new DataPage<TUPLE>(-1, curr_pageid);
DEBUG("opening datapage iterator %lld at key %s\n.", curr_pageid, key1 ? (char*)key1->key() : "NULL");
dp_itr = new DPITR_T(curr_page, key1);
}
}
}
template <class TUPLE>
TUPLE * diskTreeIterator<TUPLE>::getnext()
{
if(!this->lsmIterator_) { return NULL; }
if(dp_itr == 0)
return 0;
TUPLE* readTuple = dp_itr->getnext();
if(!readTuple)
{
delete dp_itr;
dp_itr = 0;
delete curr_page;
curr_page = 0;
if(diskTreeComponentIterator::next(-1,lsmIterator_))
{
pageid_t *pid_tmp;
pageid_t **hack = &pid_tmp;
diskTreeComponentIterator::value(-1,lsmIterator_,(byte**)hack);
curr_pageid = *pid_tmp;
curr_page = new DataPage<TUPLE>(-1, curr_pageid);
DEBUG("opening datapage iterator %lld at beginning\n.", curr_pageid);
dp_itr = new DPITR_T(curr_page->begin());
readTuple = dp_itr->getnext();
assert(readTuple);
}
// else readTuple is null. We're done.
}
return readTuple;
}
template class diskTreeIterator<datatuple>;
template class changingMemTreeIterator<rbtree_t, datatuple>;