From 9b1df5b1afe8f3899a65a7faffc382fdc0cb24d4 Mon Sep 17 00:00:00 2001 From: sears Date: Thu, 23 Feb 2012 01:11:55 +0000 Subject: [PATCH] rename classes to lowerCamelCase git-svn-id: svn+ssh://svn.corp.yahoo.com/yahoo/yrl/labs/pnuts/code/logstore@3785 8dad8b1f-cf64-0410-95b6-bcf113ffbcfe --- blsm.cpp | 130 ++++++++-------- blsm.h | 94 ++++++------ datapage.cpp | 36 ++--- datapage.h | 37 +++-- datatuple.h | 28 ++-- diskTreeComponent.cpp | 60 ++++---- diskTreeComponent.h | 38 ++--- memTreeComponent.cpp | 6 +- memTreeComponent.h | 36 ++--- mergeManager.cpp | 10 +- mergeManager.h | 14 +- mergeStats.h | 4 +- merger.cpp | 52 +++---- merger.h | 8 +- regionAllocator.h | 10 +- servers/mapkeeper/blsmRequestHandler.cpp | 142 +++++++++--------- servers/mapkeeper/blsmRequestHandler.h | 14 +- .../native/benchmarks/lsm_microbenchmarks.cpp | 102 ++++++------- servers/native/benchmarks/tcpclient_noop.cpp | 4 +- servers/native/logserver.cpp | 8 +- servers/native/logserver.h | 6 +- servers/native/network.h | 20 +-- servers/native/newserver.cpp | 8 +- servers/native/requestDispatch.cpp | 88 +++++------ servers/native/requestDispatch.h | 32 ++-- servers/native/server.cpp | 10 +- servers/native/simpleServer.cpp | 2 +- servers/native/simpleServer.h | 4 +- servers/native/tcpclient.cpp | 24 +-- servers/native/tcpclient.h | 10 +- servers/native/util/change_log_mode.cpp | 6 +- servers/native/util/copy_database.cpp | 4 +- servers/native/util/drop_database.cpp | 4 +- servers/native/util/dump_blockmap.cpp | 4 +- servers/native/util/histogram.cpp | 4 +- servers/native/util/shutdown.cpp | 4 +- servers/native/util/space_usage.cpp | 4 +- test/CMakeLists.txt | 4 +- test/check_datapage.cpp | 40 ++--- test/check_gen.cpp | 8 +- test/check_logtable.cpp | 16 +- test/check_logtree.cpp | 6 +- test/check_merge.cpp | 18 +-- test/check_mergelarge.cpp | 12 +- test/check_mergetuple.cpp | 28 ++-- test/check_rbtree.cpp | 8 +- test/check_tcpbulkinsert.cpp | 16 +- test/check_tcpclient.cpp | 18 +-- test/check_testAndSet.cpp | 18 +-- tuplemerger.cpp | 8 +- tuplemerger.h | 14 +- 51 files changed, 638 insertions(+), 643 deletions(-) diff --git a/blsm.cpp b/blsm.cpp index f19aa8c..17d4e87 100644 --- a/blsm.cpp +++ b/blsm.cpp @@ -40,7 +40,7 @@ static inline double tv_to_double(struct timeval tv) // LOG TABLE IMPLEMENTATION ///////////////////////////////////////////////////////////////// -blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size) +bLSM::bLSM(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size) { recovering = true; this->max_c0_size = max_c0_size; @@ -63,7 +63,7 @@ blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pa current_timestamp = 0; expiry = 0; this->merge_mgr = 0; - tmerger = new tuplemerger(&replace_merger); + tmerger = new tupleMerger(&replace_merger); header_mut = rwlc_initlock(); pthread_mutex_init(&rb_mut, 0); @@ -85,7 +85,7 @@ blsm::blsm(int log_mode, pageid_t max_c0_size, pageid_t internal_region_size, pa stasis_log_file_permissions); } -blsm::~blsm() +bLSM::~bLSM() { delete merge_mgr; // shuts down pretty print thread. @@ -110,17 +110,17 @@ blsm::~blsm() delete tmerger; } -void blsm::init_stasis() { +void bLSM::init_stasis() { - DataPage::register_stasis_page_impl(); + dataPage::register_stasis_page_impl(); // stasis_buffer_manager_hint_writes_are_sequential = 1; Tinit(); } -void blsm::deinit_stasis() { Tdeinit(); } +void bLSM::deinit_stasis() { Tdeinit(); } -recordid blsm::allocTable(int xid) +recordid bLSM::allocTable(int xid) { table_rec = Talloc(xid, sizeof(tbl_header)); mergeStats * stats = 0; @@ -142,7 +142,7 @@ recordid blsm::allocTable(int xid) return table_rec; } -void blsm::openTable(int xid, recordid rid) { +void bLSM::openTable(int xid, recordid rid) { table_rec = rid; Tread(xid, table_rec, &tbl_header); tree_c2 = new diskTreeComponent(xid, tbl_header.c2_root, tbl_header.c2_state, tbl_header.c2_dp_state, 0); @@ -156,23 +156,23 @@ void blsm::openTable(int xid, recordid rid) { } -void blsm::logUpdate(datatuple * tup) { +void bLSM::logUpdate(dataTuple * tup) { byte * buf = tup->to_bytes(); LogEntry * e = stasis_log_write_update(log_file, 0, INVALID_PAGE, 0/*Page**/, 0/*op*/, buf, tup->byte_length()); log_file->write_entry_done(log_file,e); free(buf); } -void blsm::replayLog() { +void bLSM::replayLog() { lsn_t start = tbl_header.log_trunc; LogHandle * lh = start ? getLSNHandle(log_file, start) : getLogHandle(log_file); const LogEntry * e; while((e = nextInLog(lh))) { switch(e->type) { case UPDATELOG: { - datatuple * tup = datatuple::from_bytes((byte*)stasis_log_entry_update_args_cptr(e)); + dataTuple * tup = dataTuple::from_bytes((byte*)stasis_log_entry_update_args_cptr(e)); insertTuple(tup); - datatuple::freetuple(tup); + dataTuple::freetuple(tup); } break; case INTERNALLOG: { } break; default: assert(e->type == UPDATELOG); abort(); @@ -184,12 +184,12 @@ void blsm::replayLog() { } -lsn_t blsm::get_log_offset() { +lsn_t bLSM::get_log_offset() { if(recovering || !log_mode) { return INVALID_LSN; } return log_file->next_available_lsn(log_file); } -void blsm::truncate_log() { +void bLSM::truncate_log() { if(recovering) { printf("Not truncating log until recovery is complete.\n"); } else { @@ -200,7 +200,7 @@ void blsm::truncate_log() { } } -void blsm::update_persistent_header(int xid, lsn_t trunc_lsn) { +void bLSM::update_persistent_header(int xid, lsn_t trunc_lsn) { tbl_header.c2_root = tree_c2->get_root_rid(); tbl_header.c2_dp_state = tree_c2->get_datapage_allocator_rid(); @@ -219,7 +219,7 @@ void blsm::update_persistent_header(int xid, lsn_t trunc_lsn) { Tset(xid, table_rec, &tbl_header); } -void blsm::flushTable() +void bLSM::flushTable() { struct timeval start_tv, stop_tv; double start, stop; @@ -277,7 +277,7 @@ void blsm::flushTable() c0_flushing = false; } -datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) +dataTuple * bLSM::findTuple(int xid, const dataTuple::key_t key, size_t keySize) { // Apply proportional backpressure to reads as well as writes. This prevents // starvation of the merge threads on fast boxes. @@ -286,12 +286,12 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) #endif //prepare a search tuple - datatuple *search_tuple = datatuple::create(key, keySize); + dataTuple *search_tuple = dataTuple::create(key, keySize); pthread_mutex_lock(&rb_mut); - datatuple *ret_tuple=0; + dataTuple *ret_tuple=0; //step 1: look in tree_c0 memTreeComponent::rbtree_t::iterator rbitr = get_tree_c0()->find(search_tuple); @@ -312,14 +312,14 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) rbitr = get_tree_c0_mergeable()->find(search_tuple); if(rbitr != get_tree_c0_mergeable()->end()) { - datatuple *tuple = *rbitr; + dataTuple *tuple = *rbitr; if(tuple->isDelete()) //tuple deleted done = true; //return ret_tuple else if(ret_tuple != 0) //merge the two { - datatuple *mtuple = tmerger->merge(tuple, ret_tuple); //merge the two - datatuple::freetuple(ret_tuple); //free tuple from current tree + dataTuple *mtuple = tmerger->merge(tuple, ret_tuple); //merge the two + dataTuple::freetuple(ret_tuple); //free tuple from current tree ret_tuple = mtuple; //set return tuple to merge result } else //key first found in old mem tree @@ -334,7 +334,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!done && get_tree_c1_prime() != 0) { DEBUG("old c1 tree not null\n"); - datatuple *tuple_oc1 = get_tree_c1_prime()->findTuple(xid, key, keySize); + dataTuple *tuple_oc1 = get_tree_c1_prime()->findTuple(xid, key, keySize); if(tuple_oc1 != NULL) { @@ -343,8 +343,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) done = true; else if(ret_tuple != 0) //merge the two { - datatuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two - datatuple::freetuple(ret_tuple); //free tuple from before + dataTuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two + dataTuple::freetuple(ret_tuple); //free tuple from before ret_tuple = mtuple; //set return tuple to merge result } else //found for the first time @@ -355,7 +355,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!use_copy) { - datatuple::freetuple(tuple_oc1); //free tuple from tree old c1 + dataTuple::freetuple(tuple_oc1); //free tuple from tree old c1 } } } @@ -363,7 +363,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) //step 3: check c1 if(!done) { - datatuple *tuple_c1 = get_tree_c1()->findTuple(xid, key, keySize); + dataTuple *tuple_c1 = get_tree_c1()->findTuple(xid, key, keySize); if(tuple_c1 != NULL) { bool use_copy = false; @@ -371,8 +371,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) done = true; else if(ret_tuple != 0) //merge the two { - datatuple *mtuple = tmerger->merge(tuple_c1, ret_tuple); //merge the two - datatuple::freetuple(ret_tuple); //free tuple from before + dataTuple *mtuple = tmerger->merge(tuple_c1, ret_tuple); //merge the two + dataTuple::freetuple(ret_tuple); //free tuple from before ret_tuple = mtuple; //set return tuple to merge result } else //found for the first time @@ -383,7 +383,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!use_copy) { - datatuple::freetuple(tuple_c1); //free tuple from tree c1 + dataTuple::freetuple(tuple_c1); //free tuple from tree c1 } } } @@ -392,7 +392,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!done && get_tree_c1_mergeable() != 0) { DEBUG("old c1 tree not null\n"); - datatuple *tuple_oc1 = get_tree_c1_mergeable()->findTuple(xid, key, keySize); + dataTuple *tuple_oc1 = get_tree_c1_mergeable()->findTuple(xid, key, keySize); if(tuple_oc1 != NULL) { @@ -401,8 +401,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) done = true; else if(ret_tuple != 0) //merge the two { - datatuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two - datatuple::freetuple(ret_tuple); //free tuple from before + dataTuple *mtuple = tmerger->merge(tuple_oc1, ret_tuple); //merge the two + dataTuple::freetuple(ret_tuple); //free tuple from before ret_tuple = mtuple; //set return tuple to merge result } else //found for the first time @@ -413,7 +413,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!use_copy) { - datatuple::freetuple(tuple_oc1); //free tuple from tree old c1 + dataTuple::freetuple(tuple_oc1); //free tuple from tree old c1 } } } @@ -422,7 +422,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!done) { DEBUG("Not in old first disk tree\n"); - datatuple *tuple_c2 = get_tree_c2()->findTuple(xid, key, keySize); + dataTuple *tuple_c2 = get_tree_c2()->findTuple(xid, key, keySize); if(tuple_c2 != NULL) { @@ -431,8 +431,8 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) done = true; else if(ret_tuple != 0) { - datatuple *mtuple = tmerger->merge(tuple_c2, ret_tuple); //merge the two - datatuple::freetuple(ret_tuple); //free tuple from before + dataTuple *mtuple = tmerger->merge(tuple_c2, ret_tuple); //merge the two + dataTuple::freetuple(ret_tuple); //free tuple from before ret_tuple = mtuple; //set return tuple to merge result } else //found for the first time @@ -443,16 +443,16 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) if(!use_copy) { - datatuple::freetuple(tuple_c2); //free tuple from tree c2 + dataTuple::freetuple(tuple_c2); //free tuple from tree c2 } } } rwlc_unlock(header_mut); - datatuple::freetuple(search_tuple); + dataTuple::freetuple(search_tuple); if (ret_tuple != NULL && ret_tuple->isDelete()) { // this is a tombstone. don't return it - datatuple::freetuple(ret_tuple); + dataTuple::freetuple(ret_tuple); return NULL; } return ret_tuple; @@ -463,7 +463,7 @@ datatuple * blsm::findTuple(int xid, const datatuple::key_t key, size_t keySize) * returns the first record found with the matching key * (not to be used together with diffs) **/ -datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize) +dataTuple * bLSM::findTuple_first(int xid, dataTuple::key_t key, size_t keySize) { // Apply proportional backpressure to reads as well as writes. This prevents // starvation of the merge threads on fast boxes. @@ -472,9 +472,9 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize) #endif //prepare a search tuple - datatuple * search_tuple = datatuple::create(key, keySize); + dataTuple * search_tuple = dataTuple::create(key, keySize); - datatuple *ret_tuple=0; + dataTuple *ret_tuple=0; //step 1: look in tree_c0 pthread_mutex_lock(&rb_mut); @@ -551,11 +551,11 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize) rwlc_unlock(header_mut); } - datatuple::freetuple(search_tuple); + dataTuple::freetuple(search_tuple); if (ret_tuple != NULL && ret_tuple->isDelete()) { // this is a tombstone. don't return it - datatuple::freetuple(ret_tuple); + dataTuple::freetuple(ret_tuple); return NULL; } @@ -563,7 +563,7 @@ datatuple * blsm::findTuple_first(int xid, datatuple::key_t key, size_t keySize) } -datatuple * blsm::insertTupleHelper(datatuple *tuple) +dataTuple * bLSM::insertTupleHelper(dataTuple *tuple) { bool need_free = false; if(!tuple->isDelete() && expiry != 0) { @@ -576,22 +576,22 @@ datatuple * blsm::insertTupleHelper(datatuple *tuple) memcpy(newkey, tuple->strippedkey(), kl); newkey[kl] = 0; memcpy(newkey+kl+1, &ts, ts_sz); - datatuple * old = tuple; - tuple = datatuple::create(newkey, kl+ 1+ ts_sz, tuple->data(), tuple->datalen()); + dataTuple * old = tuple; + tuple = dataTuple::create(newkey, kl+ 1+ ts_sz, tuple->data(), tuple->datalen()); assert(tuple->strippedkeylen() == old->strippedkeylen()); - assert(!datatuple::compare_obj(tuple, old)); + assert(!dataTuple::compare_obj(tuple, old)); free(newkey); need_free = true; } //find the previous tuple with same key in the memtree if exists pthread_mutex_lock(&rb_mut); memTreeComponent::rbtree_t::iterator rbitr = tree_c0->find(tuple); - datatuple * t = 0; - datatuple * pre_t = 0; + dataTuple * t = 0; + dataTuple * pre_t = 0; if(rbitr != tree_c0->end()) { pre_t = *rbitr; //do the merging - datatuple *new_t = tmerger->merge(pre_t, tuple); + dataTuple *new_t = tmerger->merge(pre_t, tuple); merge_mgr->get_merge_stats(0)->merged_tuples(new_t, tuple, pre_t); t = new_t; @@ -608,12 +608,12 @@ datatuple * blsm::insertTupleHelper(datatuple *tuple) } pthread_mutex_unlock(&rb_mut); - if(need_free) { datatuple::freetuple(tuple); } + if(need_free) { dataTuple::freetuple(tuple); } return pre_t; } -void blsm::insertManyTuples(datatuple ** tuples, int tuple_count) { +void bLSM::insertManyTuples(dataTuple ** tuples, int tuple_count) { for(int i = 0; i < tuple_count; i++) { merge_mgr->read_tuple_from_small_component(0, tuples[i]); } @@ -631,18 +631,18 @@ void blsm::insertManyTuples(datatuple ** tuples, int tuple_count) { int num_old_tups = 0; pageid_t sum_old_tup_lens = 0; for(int i = 0; i < tuple_count; i++) { - datatuple * old_tup = insertTupleHelper(tuples[i]); + dataTuple * old_tup = insertTupleHelper(tuples[i]); if(old_tup) { num_old_tups++; sum_old_tup_lens += old_tup->byte_length(); - datatuple::freetuple(old_tup); + dataTuple::freetuple(old_tup); } } merge_mgr->read_tuple_from_large_component(0, num_old_tups, sum_old_tup_lens); } -void blsm::insertTuple(datatuple *tuple) +void bLSM::insertTuple(dataTuple *tuple) { if(log_mode && !recovering) { logUpdate(tuple); @@ -656,26 +656,26 @@ void blsm::insertTuple(datatuple *tuple) // any locks! merge_mgr->read_tuple_from_small_component(0, tuple); - datatuple * pre_t = 0; // this is a pointer to any data tuples that we'll be deleting below. We need to update the merge_mgr statistics with it, but have to do so outside of the rb_mut region. + dataTuple * pre_t = 0; // this is a pointer to any data tuples that we'll be deleting below. We need to update the merge_mgr statistics with it, but have to do so outside of the rb_mut region. pre_t = insertTupleHelper(tuple); if(pre_t) { // needs to be here; calls update_progress, which sometimes grabs mutexes.. merge_mgr->read_tuple_from_large_component(0, pre_t); // was interspersed with the erase, insert above... - datatuple::freetuple(pre_t); //free the previous tuple + dataTuple::freetuple(pre_t); //free the previous tuple } DEBUG("tree size %d tuples %lld bytes.\n", tsize, tree_bytes); } -bool blsm::testAndSetTuple(datatuple *tuple, datatuple *tuple2) +bool bLSM::testAndSetTuple(dataTuple *tuple, dataTuple *tuple2) { bool succ = false; static pthread_mutex_t test_and_set_mut = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_lock(&test_and_set_mut); - datatuple * exists = findTuple_first(-1, tuple2 ? tuple2->strippedkey() : tuple->strippedkey(), tuple2 ? tuple2->strippedkeylen() : tuple->strippedkeylen()); + dataTuple * exists = findTuple_first(-1, tuple2 ? tuple2->strippedkey() : tuple->strippedkey(), tuple2 ? tuple2->strippedkeylen() : tuple->strippedkeylen()); if(!tuple2 || tuple2->isDelete()) { if(!exists || exists->isDelete()) { @@ -690,18 +690,18 @@ bool blsm::testAndSetTuple(datatuple *tuple, datatuple *tuple2) succ = false; } } - if(exists) datatuple::freetuple(exists); + if(exists) dataTuple::freetuple(exists); if(succ) insertTuple(tuple); pthread_mutex_unlock(&test_and_set_mut); return succ; } -void blsm::registerIterator(iterator * it) { +void bLSM::registerIterator(iterator * it) { its.push_back(it); } -void blsm::forgetIterator(iterator * it) { +void bLSM::forgetIterator(iterator * it) { for(unsigned int i = 0; i < its.size(); i++) { if(its[i] == it) { its.erase(its.begin()+i); @@ -710,7 +710,7 @@ void blsm::forgetIterator(iterator * it) { } } -void blsm::bump_epoch() { +void bLSM::bump_epoch() { epoch++; for(unsigned int i = 0; i < its.size(); i++) { its[i]->invalidate(); diff --git a/blsm.h b/blsm.h index a5b6413..c92896f 100644 --- a/blsm.h +++ b/blsm.h @@ -25,15 +25,11 @@ #include "diskTreeComponent.h" #include "memTreeComponent.h" - #include "tuplemerger.h" - #include "mergeManager.h" #include "mergeStats.h" -class logtable_mergedata; - -class blsm { +class bLSM { public: class iterator; @@ -49,28 +45,28 @@ public: // 6GB ~= 100B * 500 GB / (datapage_size * 4KB) // (100B * 500GB) / (6GB * 4KB) = 2.035 // RCS: Set this to 1 so that we do (on average) one seek per b-tree read. - blsm(int log_mode = 0, pageid_t max_c0_size = 100 * 1024 * 1024, pageid_t internal_region_size = 1000, pageid_t datapage_region_size = 10000, pageid_t datapage_size = 1); + bLSM(int log_mode = 0, pageid_t max_c0_size = 100 * 1024 * 1024, pageid_t internal_region_size = 1000, pageid_t datapage_region_size = 10000, pageid_t datapage_size = 1); - ~blsm(); + ~bLSM(); double * R() { return &r_val; } //user access functions - datatuple * findTuple(int xid, const datatuple::key_t key, size_t keySize); + dataTuple * findTuple(int xid, const dataTuple::key_t key, size_t keySize); - datatuple * findTuple_first(int xid, datatuple::key_t key, size_t keySize); + dataTuple * findTuple_first(int xid, dataTuple::key_t key, size_t keySize); private: - datatuple * insertTupleHelper(datatuple *tuple); + dataTuple * insertTupleHelper(dataTuple *tuple); public: - void insertManyTuples(struct datatuple **tuples, int tuple_count); - void insertTuple(struct datatuple *tuple); + void insertManyTuples(struct dataTuple **tuples, int tuple_count); + void insertTuple(struct dataTuple *tuple); /** This test and set has strange semantics on two fronts: * * 1) It is not atomic with respect to non-testAndSet operations (which is fine in theory, since they have no barrier semantics, and we don't have a use case to support the extra overhead) * 2) If tuple2 is not null, it looks at tuple2's key instead of tuple's key. This means you can atomically set the value of one key based on the value of another (if you want to...) */ - bool testAndSetTuple(struct datatuple *tuple, struct datatuple *tuple2); + bool testAndSetTuple(struct dataTuple *tuple, struct dataTuple *tuple2); //other class functions recordid allocTable(int xid); @@ -78,7 +74,7 @@ public: void flushTable(); void replayLog(); - void logUpdate(datatuple * tup); + void logUpdate(dataTuple * tup); static void init_stasis(); static void deinit_stasis(); @@ -115,7 +111,7 @@ public: void update_persistent_header(int xid, lsn_t log_trunc = INVALID_LSN); - inline tuplemerger * gettuplemerger(){return tmerger;} + inline tupleMerger * gettuplemerger(){return tmerger;} public: @@ -183,14 +179,14 @@ public: pageid_t datapage_region_size; // " pageid_t datapage_size; // " private: - tuplemerger *tmerger; + tupleMerger *tmerger; std::vector its; public: bool shutting_down_; - bool mightBeOnDisk(datatuple * t) { + bool mightBeOnDisk(dataTuple * t) { if(tree_c1) { if(!tree_c1->bloom_filter) { DEBUG("no c1 bloom filter\n"); return true; } if(bloom_filter_lookup(tree_c1->bloom_filter, (const char*)t->strippedkey(), t->strippedkeylen())) { DEBUG("in c1\n"); return true; } @@ -202,7 +198,7 @@ public: return mightBeAfterMemMerge(t); } - bool mightBeAfterMemMerge(datatuple * t) { + bool mightBeAfterMemMerge(dataTuple * t) { if(tree_c1_mergeable) { if(!tree_c1_mergeable->bloom_filter) { DEBUG("no c1m bloom filter\n"); return true; } @@ -220,11 +216,11 @@ public: template class mergeManyIterator { public: - explicit mergeManyIterator(ITRA* a, ITRN** iters, int num_iters, datatuple*(*merge)(const datatuple*,const datatuple*), int (*cmp)(const datatuple*,const datatuple*)) : + explicit mergeManyIterator(ITRA* a, ITRN** iters, int num_iters, dataTuple*(*merge)(const dataTuple*,const dataTuple*), int (*cmp)(const dataTuple*,const dataTuple*)) : num_iters_(num_iters+1), first_iter_(a), iters_((ITRN**)malloc(sizeof(*iters_) * num_iters)), // exactly the number passed in - current_((datatuple**)malloc(sizeof(*current_) * (num_iters_))), // one more than was passed in + current_((dataTuple**)malloc(sizeof(*current_) * (num_iters_))), // one more than was passed in last_iter_(-1), cmp_(cmp), merge_(merge), @@ -240,7 +236,7 @@ public: delete(first_iter_); for(int i = 0; i < num_iters_; i++) { if(i != last_iter_) { - if(current_[i]) datatuple::freetuple(current_[i]); + if(current_[i]) dataTuple::freetuple(current_[i]); } } for(int i = 1; i < num_iters_; i++) { @@ -250,12 +246,12 @@ public: free(iters_); free(dups); } - datatuple * peek() { - datatuple * ret = next_callerFrees(); + dataTuple * peek() { + dataTuple * ret = next_callerFrees(); last_iter_ = -1; // don't advance iterator on next peek() or getnext() call. return ret; } - datatuple * next_callerFrees() { + dataTuple * next_callerFrees() { int num_dups = 0; if(last_iter_ != -1) { // get the value after the one we just returned to the user @@ -287,7 +283,7 @@ public: } } } - datatuple * ret; + dataTuple * ret; if(!merge_) { ret = current_[min]; } else { @@ -296,7 +292,7 @@ public: } // advance the iterators that match the tuple we're returning. for(int i = 0; i < num_dups; i++) { - datatuple::freetuple(current_[dups[i]]); // should never be null + dataTuple::freetuple(current_[dups[i]]); // should never be null current_[dups[i]] = iters_[dups[i]-1]->next_callerFrees(); } last_iter_ = min; // mark the min iter to be advance at the next invocation of next(). This saves us a copy in the non-merging case. @@ -307,12 +303,12 @@ public: int num_iters_; ITRA * first_iter_; ITRN ** iters_; - datatuple ** current_; + dataTuple ** current_; int last_iter_; - int (*cmp_)(const datatuple*,const datatuple*); - datatuple*(*merge_)(const datatuple*,const datatuple*); + int (*cmp_)(const dataTuple*,const dataTuple*); + dataTuple*(*merge_)(const dataTuple*,const dataTuple*); // temporary variables initiaized once for effiency int * dups; @@ -322,7 +318,7 @@ public: class iterator { public: - explicit iterator(blsm* ltable) + explicit iterator(bLSM* ltable) : ltable(ltable), epoch(ltable->get_epoch()), merge_it_(NULL), @@ -338,7 +334,7 @@ public: // rwlc_unlock(ltable->header_mut); } - explicit iterator(blsm* ltable,datatuple *key) + explicit iterator(bLSM* ltable,dataTuple *key) : ltable(ltable), epoch(ltable->get_epoch()), merge_it_(NULL), @@ -361,16 +357,16 @@ public: ltable->forgetIterator(this); invalidate(); pthread_mutex_unlock(<able->rb_mut); - if(last_returned) datatuple::freetuple(last_returned); + if(last_returned) dataTuple::freetuple(last_returned); rwlc_unlock(ltable->header_mut); } private: - datatuple * getnextHelper() { + dataTuple * getnextHelper() { // rwlc_readlock(ltable->header_mut); revalidate(); - datatuple * tmp = merge_it_->next_callerFrees(); + dataTuple * tmp = merge_it_->next_callerFrees(); if(last_returned && tmp) { - int res = datatuple::compare(last_returned->strippedkey(), last_returned->strippedkeylen(), tmp->strippedkey(), tmp->strippedkeylen()); + int res = dataTuple::compare(last_returned->strippedkey(), last_returned->strippedkeylen(), tmp->strippedkey(), tmp->strippedkeylen()); if(res >= 0) { int al = last_returned->strippedkeylen(); char * a =(char*)malloc(al + 1); @@ -387,21 +383,21 @@ public: } if(last_returned) { - datatuple::freetuple(last_returned); + dataTuple::freetuple(last_returned); } last_returned = tmp; // rwlc_unlock(ltable->header_mut); return last_returned; } public: - datatuple * getnextIncludingTombstones() { - datatuple * ret = getnextHelper(); + dataTuple * getnextIncludingTombstones() { + dataTuple * ret = getnextHelper(); ret = ret ? ret->create_copy() : NULL; return ret; } - datatuple * getnext() { - datatuple * ret; + dataTuple * getnext() { + dataTuple * ret; while((ret = getnextHelper()) && ret->isDelete()) { } // getNextHelper handles its own memory. ret = ret ? ret->create_copy() : NULL; // XXX hate making copy! Caller should not manage our memory. return ret; @@ -427,7 +423,7 @@ public: static const int C1 = 0; static const int C1_MERGEABLE = 1; static const int C2 = 2; - blsm * ltable; + bLSM * ltable; uint64_t epoch; typedef mergeManyIterator< memTreeComponent::batchedRevalidatingIterator, @@ -438,8 +434,8 @@ public: merge_it_t* merge_it_; - datatuple * last_returned; - datatuple * key; + dataTuple * last_returned; + dataTuple * key; bool valid; int reval_count; static const int reval_period = 100; @@ -465,7 +461,7 @@ public: diskTreeComponent::iterator * disk_it[4]; epoch = ltable->get_epoch(); - datatuple *t; + dataTuple *t; if(last_returned) { t = last_returned; } else if(key) { @@ -490,13 +486,13 @@ public: disk_it[3] = ltable->get_tree_c2()->open_iterator(t); inner_merge_it_t * inner_merge_it = - new inner_merge_it_t(c0_it, c0_mergeable_it, 1, NULL, datatuple::compare_obj); - merge_it_ = new merge_it_t(inner_merge_it, disk_it, 4, NULL, datatuple::compare_obj); // XXX Hardcodes comparator, and does not handle merges + new inner_merge_it_t(c0_it, c0_mergeable_it, 1, NULL, dataTuple::compare_obj); + merge_it_ = new merge_it_t(inner_merge_it, disk_it, 4, NULL, dataTuple::compare_obj); // XXX Hardcodes comparator, and does not handle merges if(last_returned) { - datatuple * junk = merge_it_->peek(); - if(junk && !datatuple::compare(junk->strippedkey(), junk->strippedkeylen(), last_returned->strippedkey(), last_returned->strippedkeylen())) { + dataTuple * junk = merge_it_->peek(); + if(junk && !dataTuple::compare(junk->strippedkey(), junk->strippedkeylen(), last_returned->strippedkey(), last_returned->strippedkeylen())) { // we already returned junk - datatuple::freetuple(merge_it_->next_callerFrees()); + dataTuple::freetuple(merge_it_->next_callerFrees()); } } valid = true; diff --git a/datapage.cpp b/datapage.cpp index b583228..e5b4a65 100644 --- a/datapage.cpp +++ b/datapage.cpp @@ -42,7 +42,7 @@ static int notSupported(int xid, Page * p) { return 0; } END_C_DECLS -void DataPage::register_stasis_page_impl() { +void dataPage::register_stasis_page_impl() { static page_impl pi = { DATA_PAGE, 1, @@ -76,7 +76,7 @@ void DataPage::register_stasis_page_impl() { } -DataPage::DataPage(int xid, RegionAllocator * alloc, pageid_t pid): // XXX Hack!! The read-only constructor signature is too close to the other's +dataPage::dataPage(int xid, regionAllocator * alloc, pageid_t pid): // XXX Hack!! The read-only constructor signature is too close to the other's xid_(xid), page_count_(1), // will be opportunistically incremented as we scan the datapage. initial_page_count_(-1), // used by append. @@ -94,7 +94,7 @@ DataPage::DataPage(int xid, RegionAllocator * alloc, pageid_t pid): // XXX Hack releasePage(p); } -DataPage::DataPage(int xid, pageid_t page_count, RegionAllocator *alloc) : +dataPage::dataPage(int xid, pageid_t page_count, regionAllocator *alloc) : xid_(xid), page_count_(1), initial_page_count_(page_count), @@ -107,11 +107,11 @@ DataPage::DataPage(int xid, pageid_t page_count, RegionAllocator *alloc) : initialize(); } -void DataPage::initialize() { +void dataPage::initialize() { initialize_page(first_page_); } -void DataPage::initialize_page(pageid_t pageid) { +void dataPage::initialize_page(pageid_t pageid) { //load the first page Page *p; #ifdef CHECK_FOR_SCRIBBLING @@ -144,7 +144,7 @@ void DataPage::initialize_page(pageid_t pageid) { releasePage(p); } -size_t DataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_p) { +size_t dataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_p) { if(latch_p) { *latch_p = NULL; } recordid chunk = calc_chunk_from_offset(write_offset_); if(chunk.size > remaining) { @@ -167,7 +167,7 @@ size_t DataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_ } return chunk.size; } -size_t DataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) { +size_t dataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) { recordid chunk = calc_chunk_from_offset(offset); if(chunk.size > remaining) { chunk.size = remaining; @@ -190,7 +190,7 @@ size_t DataPage::read_bytes(byte * buf, off_t offset, ssize_t remaining) { return chunk.size; } -bool DataPage::initialize_next_page() { +bool dataPage::initialize_next_page() { recordid rid = calc_chunk_from_offset(write_offset_); assert(rid.slot == 0); DEBUG("\t\t%lld\n", (long long)rid.page); @@ -215,7 +215,7 @@ bool DataPage::initialize_next_page() { return true; } -Page * DataPage::write_data_and_latch(const byte * buf, size_t len, bool init_next, bool latch) { +Page * dataPage::write_data_and_latch(const byte * buf, size_t len, bool init_next, bool latch) { bool first = true; Page * p = 0; while(1) { @@ -255,11 +255,11 @@ Page * DataPage::write_data_and_latch(const byte * buf, size_t len, bool init_ne } } -bool DataPage::write_data(const byte * buf, size_t len, bool init_next) { +bool dataPage::write_data(const byte * buf, size_t len, bool init_next) { return 0 != write_data_and_latch(buf, len, init_next, false); } -bool DataPage::read_data(byte * buf, off_t offset, size_t len) { +bool dataPage::read_data(byte * buf, off_t offset, size_t len) { while(1) { assert(len > 0); size_t read_count = read_bytes(buf, offset, len); @@ -275,7 +275,7 @@ bool DataPage::read_data(byte * buf, off_t offset, size_t len) { } } -bool DataPage::append(datatuple const * dat) +bool dataPage::append(dataTuple const * dat) { // First, decide if we should append to this datapage, based on whether // appending will waste more or less space than starting a new datapage @@ -344,21 +344,21 @@ bool DataPage::append(datatuple const * dat) return succ; } -bool DataPage::recordRead(const datatuple::key_t key, size_t keySize, datatuple ** buf) +bool dataPage::recordRead(const dataTuple::key_t key, size_t keySize, dataTuple ** buf) { iterator itr(this, NULL); int match = -1; while((*buf=itr.getnext()) != 0) { - match = datatuple::compare((*buf)->strippedkey(), (*buf)->strippedkeylen(), key, keySize); + match = dataTuple::compare((*buf)->strippedkey(), (*buf)->strippedkeylen(), key, keySize); if(match<0) { //keep searching - datatuple::freetuple(*buf); + dataTuple::freetuple(*buf); *buf=0; } else if(match==0) { //found return true; } else { // match > 0, then does not exist - datatuple::freetuple(*buf); + dataTuple::freetuple(*buf); *buf = 0; break; } @@ -371,7 +371,7 @@ bool DataPage::recordRead(const datatuple::key_t key, size_t keySize, datatuple /////////////////////////////////////////////////////////////// -datatuple* DataPage::iterator::getnext() { +dataTuple* dataPage::iterator::getnext() { len_t len; bool succ; if(dp == NULL) { return NULL; } @@ -398,7 +398,7 @@ datatuple* DataPage::iterator::getnext() { read_offset_ += len; - datatuple *ret = datatuple::from_bytes(buf); + dataTuple *ret = dataTuple::from_bytes(buf); free(buf); diff --git a/datapage.h b/datapage.h index 9bba4b4..3888eb9 100644 --- a/datapage.h +++ b/datapage.h @@ -17,37 +17,36 @@ * * Author: makdere */ -#ifndef _SIMPLE_DATA_PAGE_H_ -#define _SIMPLE_DATA_PAGE_H_ +#ifndef DATA_PAGE_H_ +#define DATA_PAGE_H_ #include #include #include #include "datatuple.h" - -struct RegionAllocator; +#include "regionAllocator.h" //#define CHECK_FOR_SCRIBBLING -class DataPage +class dataPage { public: class iterator { private: - void scan_to_key(datatuple * key) { + void scan_to_key(dataTuple * key) { if(key) { len_t old_off = read_offset_; - datatuple * t = getnext(); - while(t && datatuple::compare(key->strippedkey(), key->strippedkeylen(), t->strippedkey(), t->strippedkeylen()) > 0) { - datatuple::freetuple(t); + dataTuple * t = getnext(); + while(t && dataTuple::compare(key->strippedkey(), key->strippedkeylen(), t->strippedkey(), t->strippedkeylen()) > 0) { + dataTuple::freetuple(t); old_off = read_offset_; t = getnext(); } if(t) { DEBUG("datapage opened at %s\n", t->key()); - datatuple::freetuple(t); + dataTuple::freetuple(t); read_offset_ = old_off; } else { DEBUG("datapage key not found. Offset = %lld", read_offset_); @@ -56,7 +55,7 @@ public: } } public: - iterator(DataPage *dp, datatuple * key=NULL) : read_offset_(0), dp(dp) { + iterator(dataPage *dp, dataTuple * key=NULL) : read_offset_(0), dp(dp) { scan_to_key(key); } @@ -66,11 +65,11 @@ public: } //returns the next tuple and also advances the iterator - datatuple *getnext(); + dataTuple *getnext(); private: off_t read_offset_; - DataPage *dp; + dataPage *dp; }; public: @@ -78,12 +77,12 @@ public: /** * if alloc is non-null, then reads will be optimized for sequential access */ - DataPage( int xid, RegionAllocator* alloc, pageid_t pid ); + dataPage( int xid, regionAllocator* alloc, pageid_t pid ); //to be used to create new data pages - DataPage( int xid, pageid_t page_count, RegionAllocator* alloc); + dataPage( int xid, pageid_t page_count, regionAllocator* alloc); - ~DataPage() { + ~dataPage() { assert(write_offset_ == -1); } @@ -100,8 +99,8 @@ public: } - bool append(datatuple const * dat); - bool recordRead(const datatuple::key_t key, size_t keySize, datatuple ** buf); + bool append(dataTuple const * dat); + bool recordRead(const dataTuple::key_t key, size_t keySize, dataTuple ** buf); inline uint16_t recordCount(); @@ -150,7 +149,7 @@ private: int xid_; pageid_t page_count_; const pageid_t initial_page_count_; - RegionAllocator *alloc_; + regionAllocator *alloc_; const pageid_t first_page_; off_t write_offset_; // points to the next free byte (ignoring page boundaries) }; diff --git a/datatuple.h b/datatuple.h index ecbac64..ad02c66 100644 --- a/datatuple.h +++ b/datatuple.h @@ -29,7 +29,7 @@ typedef uint32_t len_t ; static const len_t DELETE = ((len_t)0) - 1; -typedef struct datatuple +typedef struct dataTuple { public: typedef unsigned char* key_t ; @@ -38,7 +38,7 @@ private: len_t datalen_; byte* data_; // aliases key(). data_ - 1 should be the \0 terminating key(). - datatuple* sanity_check() { + dataTuple* sanity_check() { assert(rawkeylen() < 3000); return this; } @@ -83,7 +83,7 @@ public: return (key_t)(this+1); } //this is used by the stl set - bool operator() (const datatuple* lhs, const datatuple* rhs) const { + bool operator() (const dataTuple* lhs, const dataTuple* rhs) const { return compare(lhs->strippedkey(), lhs->strippedkeylen(), rhs->strippedkey(), rhs->strippedkeylen()) < 0; //strcmp((char*)lhs.key(),(char*)rhs.key()) < 0; } @@ -136,7 +136,7 @@ public: return (int64_t)*(uint64_t*)(rawkey()+1+al-ts_sz); } - static int compare_obj(const datatuple * a, const datatuple* b) { + static int compare_obj(const dataTuple * a, const dataTuple* b) { return compare(a->strippedkey(), a->strippedkeylen(), b->strippedkey(), b->strippedkeylen()); } @@ -160,16 +160,16 @@ public: } //copy the tuple. does a deep copy of the contents. - datatuple* create_copy() const { + dataTuple* create_copy() const { return create(rawkey(), rawkeylen(), data(), datalen_); } - static datatuple* create(const void* key, len_t keylen) { + static dataTuple* create(const void* key, len_t keylen) { return create(key, keylen, 0, DELETE); } - static datatuple* create(const void* key, len_t keylen, const void* data, len_t datalen) { - datatuple *ret = (datatuple*)malloc(sizeof(datatuple) + length_from_header(keylen,datalen)); + static dataTuple* create(const void* key, len_t keylen, const void* data, len_t datalen) { + dataTuple *ret = (dataTuple*)malloc(sizeof(dataTuple) + length_from_header(keylen,datalen)); memcpy(ret->rawkey(), key, keylen); ret->data_ = ret->rawkey() + keylen; // need to set this even if delete, since it encodes the key length. if(datalen != DELETE) { @@ -195,17 +195,17 @@ public: } //format of buf: key _ data. The caller needs to 'peel' off key length and data length for this call. - static datatuple* from_bytes(len_t keylen, len_t datalen, byte* buf) { - datatuple *dt = (datatuple*) malloc(sizeof(datatuple) + length_from_header(keylen,datalen)); + static dataTuple* from_bytes(len_t keylen, len_t datalen, byte* buf) { + dataTuple *dt = (dataTuple*) malloc(sizeof(dataTuple) + length_from_header(keylen,datalen)); dt->datalen_ = datalen; memcpy(dt->rawkey(),buf, length_from_header(keylen,datalen)); dt->data_ = dt->rawkey() + keylen; return dt->sanity_check(); } - static datatuple* from_bytes(byte* buf) { + static dataTuple* from_bytes(byte* buf) { len_t keylen = ((len_t*)buf)[0]; len_t buflen = length_from_header(keylen, ((len_t*)buf)[1]); - datatuple *dt = (datatuple*) malloc(sizeof(datatuple) + buflen); + dataTuple *dt = (dataTuple*) malloc(sizeof(dataTuple) + buflen); dt->datalen_ = ((len_t*)buf)[1]; memcpy(dt->rawkey(),((len_t*)buf)+2,buflen); dt->data_ = dt->rawkey() + keylen; @@ -213,11 +213,11 @@ public: return dt->sanity_check(); } - static inline void freetuple(datatuple* dt) { + static inline void freetuple(dataTuple* dt) { free(dt); } -} datatuple; +} dataTuple; #endif diff --git a/diskTreeComponent.cpp b/diskTreeComponent.cpp index 9123e70..35e547e 100644 --- a/diskTreeComponent.cpp +++ b/diskTreeComponent.cpp @@ -76,7 +76,7 @@ void diskTreeComponent::writes_done() { } } -int diskTreeComponent::insertTuple(int xid, datatuple *t) +int diskTreeComponent::insertTuple(int xid, dataTuple *t) { if(bloom_filter) { bloom_filter_insert(bloom_filter, (const char*)t->strippedkey(), t->strippedkeylen()); @@ -96,14 +96,14 @@ int diskTreeComponent::insertTuple(int xid, datatuple *t) return ret; } -DataPage* diskTreeComponent::insertDataPage(int xid, datatuple *tuple) { +dataPage* diskTreeComponent::insertDataPage(int xid, dataTuple *tuple) { //create a new data page -- either the last region is full, or the last data page doesn't want our tuple. (or both) - DataPage * dp = 0; + dataPage * dp = 0; int count = 0; while(dp==0) { - dp = new DataPage(xid, datapage_size, ltree->get_datapage_alloc()); + dp = new dataPage(xid, datapage_size, ltree->get_datapage_alloc()); //insert the record into the data page if(!dp->append(tuple)) @@ -130,9 +130,9 @@ DataPage* diskTreeComponent::insertDataPage(int xid, datatuple *tuple) { return dp; } -datatuple * diskTreeComponent::findTuple(int xid, datatuple::key_t key, size_t keySize) +dataTuple * diskTreeComponent::findTuple(int xid, dataTuple::key_t key, size_t keySize) { - datatuple * tup=0; + dataTuple * tup=0; if(bloom_filter) { if(!bloom_filter_lookup(bloom_filter, (const char*)key, keySize)) { @@ -145,7 +145,7 @@ datatuple * diskTreeComponent::findTuple(int xid, datatuple::key_t key, size_t k if(pid!=-1) { - DataPage * dp = new DataPage(xid, 0, pid); + dataPage * dp = new dataPage(xid, 0, pid); dp->recordRead(key, keySize, &tup); delete dp; } @@ -194,7 +194,7 @@ recordid diskTreeComponent::internalNodes::create(int xid) { void diskTreeComponent::internalNodes::writeNodeRecord(int xid, Page * p, recordid & rid, const byte *key, size_t keylen, pageid_t ptr) { DEBUG("writenoderecord:\tp->id\t%lld\tkey:\t%s\tkeylen: %d\tval_page\t%lld\n", - p->id, datatuple::key_to_str(key).c_str(), keylen, ptr); + p->id, dataTuple::key_to_str(key).c_str(), keylen, ptr); indexnode_rec *nr = (indexnode_rec*)stasis_record_write_begin(xid, p, rid); nr->ptr = ptr; memcpy(nr+1, key, keylen); @@ -338,7 +338,7 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid, } else { DEBUG("Appended new internal node tree depth = %lld key = %s\n", - depth, datatuple::key_to_str(key).c_str()); + depth, dataTuple::key_to_str(key).c_str()); } lastLeaf = ret.page; @@ -346,7 +346,7 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid, } else { // write the new value to an existing page - DEBUG("Writing %s\t%d to existing page# %lld\n", datatuple::key_to_str(key).c_str(), + DEBUG("Writing %s\t%d to existing page# %lld\n", dataTuple::key_to_str(key).c_str(), val_page, lastLeafPage->id); stasis_record_alloc_done(xid, lastLeafPage, ret); @@ -368,15 +368,15 @@ recordid diskTreeComponent::internalNodes::appendPage(int xid, diskTreeComponent::internalNodes::internalNodes(int xid, pageid_t internal_region_size, pageid_t datapage_region_size, pageid_t datapage_size) : lastLeaf(-1), - internal_node_alloc(new RegionAllocator(xid, internal_region_size)), - datapage_alloc(new RegionAllocator(xid, datapage_region_size)) + internal_node_alloc(new regionAllocator(xid, internal_region_size)), + datapage_alloc(new regionAllocator(xid, datapage_region_size)) { create(xid); } diskTreeComponent::internalNodes::internalNodes(int xid, recordid root, recordid internal_node_state, recordid datapage_state) : lastLeaf(-1), root_rec(root), - internal_node_alloc(new RegionAllocator(xid, internal_node_state)), - datapage_alloc(new RegionAllocator(xid, datapage_state)) + internal_node_alloc(new regionAllocator(xid, internal_node_state)), + datapage_alloc(new regionAllocator(xid, datapage_state)) { } diskTreeComponent::internalNodes::~internalNodes() { @@ -646,8 +646,8 @@ recordid diskTreeComponent::internalNodes::lookup(int xid, rid.size = stasis_record_length_read(xid, node, rid); const indexnode_rec *rec = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid); - int cmpval = datatuple::compare((datatuple::key_t) (rec+1), rid.size-sizeof(*rec), - (datatuple::key_t) key, keySize); + int cmpval = dataTuple::compare((dataTuple::key_t) (rec+1), rid.size-sizeof(*rec), + (dataTuple::key_t) key, keySize); stasis_record_read_done(xid,node,rid,(const byte*)rec); // key of current node is too big; there can be no matches under it. @@ -716,7 +716,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t rid.slot = i; const indexnode_rec *nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid); printf("\tchild_page_id:%lld\tkey:%s\n", nr->ptr, - datatuple::key_to_str((byte*)(nr+1)).c_str()); + dataTuple::key_to_str((byte*)(nr+1)).c_str()); stasis_record_read_done(xid, node, rid, (const byte*)nr); } @@ -733,7 +733,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t rid.slot = FIRST_SLOT; const indexnode_rec *nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid); printf("\tdata_page_id:%lld\tkey:%s\n", nr->ptr, - datatuple::key_to_str((byte*)(nr+1)).c_str()); + dataTuple::key_to_str((byte*)(nr+1)).c_str()); stasis_record_read_done(xid, node, rid, (const byte*)nr); printf("\t...\n"); @@ -741,7 +741,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t rid.slot= numslots - 1; nr = (const indexnode_rec*)stasis_record_read_begin(xid,node,rid); printf("\tdata_page_id:%lld\tkey:%s\n", nr->ptr, - datatuple::key_to_str((byte*)(nr+1)).c_str()); + dataTuple::key_to_str((byte*)(nr+1)).c_str()); stasis_record_read_done(xid, node, rid, (const byte*)nr); } unlock(node->rwlatch); @@ -752,7 +752,7 @@ void diskTreeComponent::internalNodes::print_tree(int xid, pageid_t pid, int64_t //diskTreeComponentIterator implementation ///////////////////////////////////////////////// -diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* ro_alloc, recordid root) { +diskTreeComponent::internalNodes::iterator::iterator(int xid, regionAllocator* ro_alloc, recordid root) { ro_alloc_ = ro_alloc; if(root.page == 0 && root.slot == 0 && root.size == -1) abort(); p = ro_alloc_->load_page(xid,root.page); @@ -798,7 +798,7 @@ diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* r if(!justOnePage) readlock(p->rwlatch,0); } -diskTreeComponent::internalNodes::iterator::iterator(int xid, RegionAllocator* ro_alloc, recordid root, const byte* key, len_t keylen) { +diskTreeComponent::internalNodes::iterator::iterator(int xid, regionAllocator* ro_alloc, recordid root, const byte* key, len_t keylen) { if(root.page == NULLRID.page && root.slot == NULLRID.slot) abort(); ro_alloc_ = ro_alloc; p = ro_alloc_->load_page(xid,root.page); @@ -917,7 +917,7 @@ void diskTreeComponent::internalNodes::iterator::close() { // tree iterator implementation ///////////////////////////////////////////////////////////////////// -void diskTreeComponent::iterator::init_iterators(datatuple * key1, datatuple * key2) { +void diskTreeComponent::iterator::init_iterators(dataTuple * key1, dataTuple * key2) { assert(!key2); // unimplemented if(tree_.size == INVALID_SIZE) { lsmIterator_ = NULL; @@ -931,7 +931,7 @@ void diskTreeComponent::iterator::init_iterators(datatuple * key1, datatuple * k } diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, mergeManager * mgr, double target_progress_delta, bool * flushing) : - ro_alloc_(new RegionAllocator()), + ro_alloc_(new regionAllocator()), tree_(tree ? tree->get_root_rec() : NULLRID), mgr_(mgr), target_progress_delta_(target_progress_delta), @@ -941,8 +941,8 @@ diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, me init_helper(NULL); } -diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, datatuple* key) : - ro_alloc_(new RegionAllocator()), +diskTreeComponent::iterator::iterator(diskTreeComponent::internalNodes *tree, dataTuple* key) : + ro_alloc_(new regionAllocator()), tree_(tree ? tree->get_root_rec() : NULLRID), mgr_(NULL), target_progress_delta_(0.0), @@ -965,7 +965,7 @@ diskTreeComponent::iterator::~iterator() { delete ro_alloc_; } -void diskTreeComponent::iterator::init_helper(datatuple* key1) +void diskTreeComponent::iterator::init_helper(dataTuple* key1) { if(!lsmIterator_) { @@ -988,7 +988,7 @@ void diskTreeComponent::iterator::init_helper(datatuple* key1) lsmIterator_->value((byte**)hack); curr_pageid = *pid_tmp; - curr_page = new DataPage(-1, ro_alloc_, curr_pageid); + curr_page = new dataPage(-1, ro_alloc_, curr_pageid); DEBUG("opening datapage iterator %lld at key %s\n.", curr_pageid, key1 ? (char*)key1->key() : "NULL"); dp_itr = new DPITR_T(curr_page, key1); @@ -997,14 +997,14 @@ void diskTreeComponent::iterator::init_helper(datatuple* key1) } } -datatuple * diskTreeComponent::iterator::next_callerFrees() +dataTuple * diskTreeComponent::iterator::next_callerFrees() { if(!this->lsmIterator_) { return NULL; } if(dp_itr == 0) return 0; - datatuple* readTuple = dp_itr->getnext(); + dataTuple* readTuple = dp_itr->getnext(); if(!readTuple) @@ -1022,7 +1022,7 @@ datatuple * diskTreeComponent::iterator::next_callerFrees() size_t ret = lsmIterator_->value((byte**)hack); assert(ret == sizeof(pageid_t)); curr_pageid = *pid_tmp; - curr_page = new DataPage(-1, ro_alloc_, curr_pageid); + curr_page = new dataPage(-1, ro_alloc_, curr_pageid); DEBUG("opening datapage iterator %lld at beginning\n.", curr_pageid); dp_itr = new DPITR_T(curr_page->begin()); diff --git a/diskTreeComponent.h b/diskTreeComponent.h index 346179a..db15d21 100644 --- a/diskTreeComponent.h +++ b/diskTreeComponent.h @@ -74,15 +74,15 @@ class diskTreeComponent { recordid get_datapage_allocator_rid(); recordid get_internal_node_allocator_rid(); internalNodes * get_internal_nodes() { return ltree; } - datatuple* findTuple(int xid, datatuple::key_t key, size_t keySize); - int insertTuple(int xid, datatuple *t); + dataTuple* findTuple(int xid, dataTuple::key_t key, size_t keySize); + int insertTuple(int xid, dataTuple *t); void writes_done(); iterator * open_iterator(mergeManager * mgr = NULL, double target_size = 0, bool * flushing = NULL) { return new iterator(ltree, mgr, target_size, flushing); } - iterator * open_iterator(datatuple * key) { + iterator * open_iterator(dataTuple * key) { if(key != NULL) { return new iterator(ltree, key); } else { @@ -100,10 +100,10 @@ class diskTreeComponent { } private: - DataPage* insertDataPage(int xid, datatuple *tuple); + dataPage* insertDataPage(int xid, dataTuple *tuple); internalNodes * ltree; - DataPage* dp; + dataPage* dp; pageid_t datapage_size; /*mergeManager::mergeStats*/ void *stats; // XXX hack to work around circular includes. @@ -122,8 +122,8 @@ class diskTreeComponent { //appends a leaf page, val_page is the id of the leaf page recordid appendPage(int xid, const byte *key,size_t keySize, pageid_t val_page); - inline RegionAllocator* get_datapage_alloc() { return datapage_alloc; } - inline RegionAllocator* get_internal_node_alloc() { return internal_node_alloc; } + inline regionAllocator* get_datapage_alloc() { return datapage_alloc; } + inline regionAllocator* get_internal_node_alloc() { return internal_node_alloc; } const recordid &get_root_rec(){return root_rec;} private: @@ -168,8 +168,8 @@ class diskTreeComponent { void print_tree(int xid, pageid_t pid, int64_t depth); recordid root_rec; - RegionAllocator* internal_node_alloc; - RegionAllocator* datapage_alloc; + regionAllocator* internal_node_alloc; + regionAllocator* datapage_alloc; struct indexnode_rec { pageid_t ptr; @@ -178,8 +178,8 @@ class diskTreeComponent { public: class iterator { public: - iterator(int xid, RegionAllocator *ro_alloc, recordid root); - iterator(int xid, RegionAllocator *ro_alloc, recordid root, const byte* key, len_t keylen); + iterator(int xid, regionAllocator *ro_alloc, recordid root); + iterator(int xid, regionAllocator *ro_alloc, recordid root, const byte* key, len_t keylen); int next(); void close(); @@ -197,7 +197,7 @@ class diskTreeComponent { inline void releaseLock() { } private: - RegionAllocator * ro_alloc_; + regionAllocator * ro_alloc_; Page * p; int xid_; bool done; @@ -216,21 +216,21 @@ class diskTreeComponent { public: explicit iterator(diskTreeComponent::internalNodes *tree, mergeManager * mgr = NULL, double target_size = 0, bool * flushing = NULL); - explicit iterator(diskTreeComponent::internalNodes *tree,datatuple *key); + explicit iterator(diskTreeComponent::internalNodes *tree,dataTuple *key); ~iterator(); - datatuple * next_callerFrees(); + dataTuple * next_callerFrees(); private: - void init_iterators(datatuple * key1, datatuple * key2); - inline void init_helper(datatuple * key1); + void init_iterators(dataTuple * key1, dataTuple * key2); + inline void init_helper(dataTuple * key1); explicit iterator() { abort(); } void operator=(iterator & t) { abort(); } int operator-(iterator & t) { abort(); } - RegionAllocator * ro_alloc_; // has a filehandle that we use to optimize sequential scans. + regionAllocator * ro_alloc_; // has a filehandle that we use to optimize sequential scans. recordid tree_; //root of the tree mergeManager * mgr_; double target_progress_delta_; @@ -239,8 +239,8 @@ class diskTreeComponent { diskTreeComponent::internalNodes::iterator* lsmIterator_; pageid_t curr_pageid; //current page id - DataPage *curr_page; //current page - typedef DataPage::iterator DPITR_T; + dataPage *curr_page; //current page + typedef dataPage::iterator DPITR_T; DPITR_T *dp_itr; }; diff --git a/memTreeComponent.cpp b/memTreeComponent.cpp index e8d4daf..faef714 100644 --- a/memTreeComponent.cpp +++ b/memTreeComponent.cpp @@ -20,14 +20,14 @@ #include "datatuple.h" void memTreeComponent::tearDownTree(rbtree_ptr_t tree) { - datatuple * t = 0; + dataTuple * t = 0; rbtree_t::iterator old; for(rbtree_t::iterator delitr = tree->begin(); delitr != tree->end(); delitr++) { if(t) { tree->erase(old); - datatuple::freetuple(t); + dataTuple::freetuple(t); t = 0; } t = *delitr; @@ -35,7 +35,7 @@ void memTreeComponent::tearDownTree(rbtree_ptr_t tree) { } if(t) { tree->erase(old); - datatuple::freetuple(t); + dataTuple::freetuple(t); } delete tree; } diff --git a/memTreeComponent.h b/memTreeComponent.h index 0b0a669..5c68354 100644 --- a/memTreeComponent.h +++ b/memTreeComponent.h @@ -26,7 +26,7 @@ class memTreeComponent { public: // typedef std::set > rbtree_t; - typedef std::set rbtree_t; + typedef std::set rbtree_t; typedef rbtree_t* rbtree_ptr_t; static void tearDownTree(rbtree_ptr_t t); @@ -47,7 +47,7 @@ public: init_iterators(s, NULL, NULL); } - iterator( rbtree_t *s, datatuple *&key ) + iterator( rbtree_t *s, dataTuple *&key ) : first_(true), done_(s == NULL) { init_iterators(s, key, NULL); } @@ -57,7 +57,7 @@ public: delete itend_; } - datatuple* next_callerFrees() { + dataTuple* next_callerFrees() { if(done_) { return NULL; } if(first_) { first_ = 0;} else { (*it_)++; } if(*it_==*itend_) { done_= true; return NULL; } @@ -67,7 +67,7 @@ public: private: - void init_iterators(rbtree_t * s, datatuple * key1, datatuple * key2) { + void init_iterators(rbtree_t * s, dataTuple * key1, dataTuple * key2) { if(s) { it_ = key1 ? new MTITER(s->lower_bound(key1)) : new MTITER(s->begin()); itend_ = key2 ? new MTITER(s->upper_bound(key2)) : new MTITER(s->end()); @@ -114,7 +114,7 @@ public: } if(mut_) pthread_mutex_unlock(mut_); } - revalidatingIterator( rbtree_t *s, pthread_mutex_t * rb_mut, datatuple *&key ) : s_(s), mut_(rb_mut) { + revalidatingIterator( rbtree_t *s, pthread_mutex_t * rb_mut, dataTuple *&key ) : s_(s), mut_(rb_mut) { if(mut_) pthread_mutex_lock(mut_); if(key) { if(s_->find(key) != s_->end()) { @@ -136,12 +136,12 @@ public: } ~revalidatingIterator() { - if(next_ret_) datatuple::freetuple(next_ret_); + if(next_ret_) dataTuple::freetuple(next_ret_); } - datatuple* next_callerFrees() { + dataTuple* next_callerFrees() { if(mut_) pthread_mutex_lock(mut_); - datatuple * ret = next_ret_; + dataTuple * ret = next_ret_; if(next_ret_) { if(s_->upper_bound(next_ret_) == s_->end()) { next_ret_ = 0; @@ -159,7 +159,7 @@ public: int operator-(revalidatingIterator & t) { abort(); } rbtree_t *s_; - datatuple * next_ret_; + dataTuple * next_ret_; pthread_mutex_t * mut_; }; @@ -174,7 +174,7 @@ public: typedef rbtree_t::const_iterator MTITER; - void populate_next_ret_impl(std::_Rb_tree_const_iterator/*MTITER*/ it) { + void populate_next_ret_impl(std::_Rb_tree_const_iterator/*MTITER*/ it) { num_batched_ = 0; cur_off_ = 0; while(it != s_->end() && num_batched_ < batch_size_) { @@ -183,7 +183,7 @@ public: it++; } } - void populate_next_ret(datatuple *key=NULL, bool include_key=false) { + void populate_next_ret(dataTuple *key=NULL, bool include_key=false) { if(cur_off_ == num_batched_) { if(mut_) pthread_mutex_lock(mut_); if(mgr_) { @@ -206,24 +206,24 @@ public: public: batchedRevalidatingIterator( rbtree_t *s, mergeManager * mgr, int64_t target_size, bool * flushing, int batch_size, pthread_mutex_t * rb_mut ) : s_(s), mgr_(mgr), target_size_(target_size), flushing_(flushing), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) { - next_ret_ = (datatuple**)malloc(sizeof(next_ret_[0]) * batch_size_); + next_ret_ = (dataTuple**)malloc(sizeof(next_ret_[0]) * batch_size_); populate_next_ret(); } - batchedRevalidatingIterator( rbtree_t *s, int batch_size, pthread_mutex_t * rb_mut, datatuple *&key ) : s_(s), mgr_(NULL), target_size_(0), flushing_(0), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) { - next_ret_ = (datatuple**)malloc(sizeof(next_ret_[0]) * batch_size_); + batchedRevalidatingIterator( rbtree_t *s, int batch_size, pthread_mutex_t * rb_mut, dataTuple *&key ) : s_(s), mgr_(NULL), target_size_(0), flushing_(0), batch_size_(batch_size), num_batched_(batch_size), cur_off_(batch_size), mut_(rb_mut) { + next_ret_ = (dataTuple**)malloc(sizeof(next_ret_[0]) * batch_size_); populate_next_ret(key, true); } ~batchedRevalidatingIterator() { for(int i = cur_off_; i < num_batched_; i++) { - datatuple::freetuple(next_ret_[i]); + dataTuple::freetuple(next_ret_[i]); } free(next_ret_); } - datatuple* next_callerFrees() { + dataTuple* next_callerFrees() { if(cur_off_ == num_batched_) { return NULL; } // the last thing we did is call populate_next_ret_(), which only leaves us in this state at the end of the iterator. - datatuple * ret = next_ret_[cur_off_]; + dataTuple * ret = next_ret_[cur_off_]; cur_off_++; populate_next_ret(ret); return ret; @@ -235,7 +235,7 @@ public: int operator-(batchedRevalidatingIterator & t) { abort(); } rbtree_t *s_; - datatuple ** next_ret_; + dataTuple ** next_ret_; mergeManager * mgr_; int64_t target_size_; // the low-water size for the tree. If cur_size_ is not null, and *cur_size_ < C * target_size_, we sleep. bool* flushing_; // never block if *flushing is true. diff --git a/mergeManager.cpp b/mergeManager.cpp index 233209d..632260e 100644 --- a/mergeManager.cpp +++ b/mergeManager.cpp @@ -425,7 +425,7 @@ void mergeManager::tick(mergeStats * s) { } } -void mergeManager::read_tuple_from_small_component(int merge_level, datatuple * tup) { +void mergeManager::read_tuple_from_small_component(int merge_level, dataTuple * tup) { if(tup) { mergeStats * s = get_merge_stats(merge_level); (s->num_tuples_in_small)++; @@ -450,7 +450,7 @@ void mergeManager::read_tuple_from_large_component(int merge_level, int tuple_co } } -void mergeManager::wrote_tuple(int merge_level, datatuple * tup) { +void mergeManager::wrote_tuple(int merge_level, dataTuple * tup) { mergeStats * s = get_merge_stats(merge_level); (s->num_tuples_out)++; (s->bytes_out) += tup->byte_length(); @@ -543,7 +543,7 @@ void mergeManager::init_helper(void) { pthread_create(&update_progress_pthread, 0, merge_manager_update_progress_thread, (void*)this); } -mergeManager::mergeManager(blsm *ltable): +mergeManager::mergeManager(bLSM *ltable): UPDATE_PROGRESS_PERIOD(0.005), ltable(ltable) { c0 = new mergeStats(0, ltable ? ltable->max_c0_size : 10000000); @@ -551,7 +551,7 @@ mergeManager::mergeManager(blsm *ltable): c2 = new mergeStats(2, 0); init_helper(); } -mergeManager::mergeManager(blsm *ltable, int xid, recordid rid): +mergeManager::mergeManager(bLSM *ltable, int xid, recordid rid): UPDATE_PROGRESS_PERIOD(0.005), ltable(ltable) { marshalled_header h; @@ -581,7 +581,7 @@ void mergeManager::marshal(int xid, recordid rid) { void mergeManager::pretty_print(FILE * out) { #if EXTENDED_STATS - blsm * lt = ltable; + bLSM * lt = ltable; bool have_c0 = false; bool have_c0m = false; bool have_c1 = false; diff --git a/mergeManager.h b/mergeManager.h index da0ebad..b453c18 100644 --- a/mergeManager.h +++ b/mergeManager.h @@ -27,7 +27,7 @@ #include #include -class blsm; +class bLSM; class mergeStats; class mergeManager { @@ -48,8 +48,8 @@ public: uint64_t long_tv(struct timeval& tv) { return (1000000ULL * (uint64_t)tv.tv_sec) + ((uint64_t)tv.tv_usec); } - mergeManager(blsm *ltable); - mergeManager(blsm *ltable, int xid, recordid rid); + mergeManager(bLSM *ltable); + mergeManager(bLSM *ltable, int xid, recordid rid); void marshal(int xid, recordid rid); recordid talloc(int xid); ~mergeManager(); @@ -62,14 +62,14 @@ public: void tick(mergeStats * s); mergeStats* get_merge_stats(int mergeLevel); - void read_tuple_from_small_component(int merge_level, datatuple * tup); - void read_tuple_from_large_component(int merge_level, datatuple * tup) { + void read_tuple_from_small_component(int merge_level, dataTuple * tup); + void read_tuple_from_large_component(int merge_level, dataTuple * tup) { if(tup) read_tuple_from_large_component(merge_level, 1, tup->byte_length()); } void read_tuple_from_large_component(int merge_level, int tuple_count, pageid_t byte_len); - void wrote_tuple(int merge_level, datatuple * tup); + void wrote_tuple(int merge_level, dataTuple * tup); void pretty_print(FILE * out); void *pretty_print_thread(); void *update_progress_thread(); @@ -106,7 +106,7 @@ private: * * TODO: remove mergeManager->ltable? */ - blsm* ltable; + bLSM* ltable; mergeStats * c0; /// Per-tree component statistics for c0 and c0_mergeable (the latter should always be null...) mergeStats * c1; /// Per-tree component statistics for c1 and c1_mergeable. mergeStats * c2; /// Per-tree component statistics for c2. diff --git a/mergeStats.h b/mergeStats.h index 959113b..2aa0eb3 100644 --- a/mergeStats.h +++ b/mergeStats.h @@ -182,9 +182,9 @@ class mergeStats { just_handed_off = true; } } - void merged_tuples(datatuple * merged, datatuple * small, datatuple * large) { + void merged_tuples(dataTuple * merged, dataTuple * small, dataTuple * large) { } - void wrote_datapage(DataPage *dp) { + void wrote_datapage(dataPage *dp) { #if EXTENDED_STATS stats_num_datapages_out++; stats_bytes_out_with_overhead += (PAGE_SIZE * dp->get_page_count()); diff --git a/merger.cpp b/merger.cpp index fe48d9f..6317bb2 100644 --- a/merger.cpp +++ b/merger.cpp @@ -22,27 +22,27 @@ #include static void* memMerge_thr(void* arg) { - return ((merge_scheduler*)arg)->memMergeThread(); + return ((mergeScheduler*)arg)->memMergeThread(); } static void* diskMerge_thr(void* arg) { - return ((merge_scheduler*)arg)->diskMergeThread(); + return ((mergeScheduler*)arg)->diskMergeThread(); } -merge_scheduler::merge_scheduler(blsm *ltable) : ltable_(ltable), MIN_R(3.0) { } -merge_scheduler::~merge_scheduler() { } +mergeScheduler::mergeScheduler(bLSM *ltable) : ltable_(ltable), MIN_R(3.0) { } +mergeScheduler::~mergeScheduler() { } -void merge_scheduler::shutdown() { +void mergeScheduler::shutdown() { ltable_->stop(); pthread_join(mem_merge_thread_, 0); pthread_join(disk_merge_thread_, 0); } -void merge_scheduler::start() { +void mergeScheduler::start() { pthread_create(&mem_merge_thread_, 0, memMerge_thr, this); pthread_create(&disk_merge_thread_, 0, diskMerge_thr, this); } -bool insert_filter(blsm * ltable, datatuple * t, bool dropDeletes) { +bool insert_filter(bLSM * ltable, dataTuple * t, bool dropDeletes) { if(t->isDelete()) { if(dropDeletes || ! ltable->mightBeAfterMemMerge(t)) { return false; @@ -57,7 +57,7 @@ template void merge_iterators(int xid, diskTreeComponent * forceMe, ITA *itrA, ITB *itrB, - blsm *ltable, + bLSM *ltable, diskTreeComponent *scratch_tree, mergeStats * stats, bool dropDeletes); @@ -84,7 +84,7 @@ void merge_iterators(int xid, diskTreeComponent * forceMe, Merge algorithm: actual order: 1 2 3 4 5 6 12 11.5 11 [7 8 (9) 10] 13 */ -void * merge_scheduler::memMergeThread() { +void * mergeScheduler::memMergeThread() { int xid; @@ -240,7 +240,7 @@ void * merge_scheduler::memMergeThread() { } -void * merge_scheduler::diskMergeThread() +void * mergeScheduler::diskMergeThread() { int xid; @@ -355,11 +355,11 @@ static void periodically_force(int xid, int *i, diskTreeComponent * forceMe, sta } } -static int garbage_collect(blsm * ltable_, datatuple ** garbage, int garbage_len, int next_garbage, bool force = false) { +static int garbage_collect(bLSM * ltable_, dataTuple ** garbage, int garbage_len, int next_garbage, bool force = false) { if(next_garbage == garbage_len || force) { pthread_mutex_lock(<able_->rb_mut); for(int i = 0; i < next_garbage; i++) { - datatuple * t2tmp = NULL; + dataTuple * t2tmp = NULL; { memTreeComponent::rbtree_t::iterator rbitr = ltable_->get_tree_c0()->find(garbage[i]); if(rbitr != ltable_->get_tree_c0()->end()) { @@ -375,9 +375,9 @@ static int garbage_collect(blsm * ltable_, datatuple ** garbage, int garbage_len if(t2tmp) { ltable_->get_tree_c0()->erase(garbage[i]); //ltable_->merge_mgr->get_merge_stats(0)->current_size -= garbage[i]->byte_length(); - datatuple::freetuple(t2tmp); + dataTuple::freetuple(t2tmp); } - datatuple::freetuple(garbage[i]); + dataTuple::freetuple(garbage[i]); } pthread_mutex_unlock(<able_->rb_mut); return 0; @@ -391,20 +391,20 @@ void merge_iterators(int xid, diskTreeComponent * forceMe, ITA *itrA, //iterator on c1 or c2 ITB *itrB, //iterator on c0 or c1, respectively - blsm *ltable, + bLSM *ltable, diskTreeComponent *scratch_tree, mergeStats * stats, bool dropDeletes // should be true iff this is biggest component ) { stasis_log_t * log = (stasis_log_t*)stasis_log(); - datatuple *t1 = itrA->next_callerFrees(); + dataTuple *t1 = itrA->next_callerFrees(); ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1); - datatuple *t2 = 0; + dataTuple *t2 = 0; int garbage_len = 100; int next_garbage = 0; - datatuple ** garbage = (datatuple**)malloc(sizeof(garbage[0]) * garbage_len); + dataTuple ** garbage = (dataTuple**)malloc(sizeof(garbage[0]) * garbage_len); int i = 0; @@ -415,7 +415,7 @@ void merge_iterators(int xid, DEBUG("tuple\t%lld: keylen %d datalen %d\n", ntuples, *(t2->keylen),*(t2->datalen) ); - while(t1 != 0 && datatuple::compare(t1->rawkey(), t1->rawkeylen(), t2->rawkey(), t2->rawkeylen()) < 0) // t1 is less than t2 + while(t1 != 0 && dataTuple::compare(t1->rawkey(), t1->rawkeylen(), t2->rawkey(), t2->rawkeylen()) < 0) // t1 is less than t2 { //insert t1 if(insert_filter(ltable, t1, dropDeletes)) { @@ -423,7 +423,7 @@ void merge_iterators(int xid, i+=t1->byte_length(); ltable->merge_mgr->wrote_tuple(stats->merge_level, t1); } - datatuple::freetuple(t1); + dataTuple::freetuple(t1); //advance itrA t1 = itrA->next_callerFrees(); @@ -432,9 +432,9 @@ void merge_iterators(int xid, periodically_force(xid, &i, forceMe, log); } - if(t1 != 0 && datatuple::compare(t1->strippedkey(), t1->strippedkeylen(), t2->strippedkey(), t2->strippedkeylen()) == 0) + if(t1 != 0 && dataTuple::compare(t1->strippedkey(), t1->strippedkeylen(), t2->strippedkey(), t2->strippedkeylen()) == 0) { - datatuple *mtuple = ltable->gettuplemerger()->merge(t1,t2); + dataTuple *mtuple = ltable->gettuplemerger()->merge(t1,t2); stats->merged_tuples(mtuple, t2, t1); // this looks backwards, but is right. //insert merged tuple, drop deletes @@ -443,10 +443,10 @@ void merge_iterators(int xid, i+=mtuple->byte_length(); ltable->merge_mgr->wrote_tuple(stats->merge_level, mtuple); } - datatuple::freetuple(t1); + dataTuple::freetuple(t1); t1 = itrA->next_callerFrees(); //advance itrA ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1); - datatuple::freetuple(mtuple); + dataTuple::freetuple(mtuple); periodically_force(xid, &i, forceMe, log); } else @@ -469,7 +469,7 @@ void merge_iterators(int xid, next_garbage++; } if(stats->merge_level != 1) { - datatuple::freetuple(t2); + dataTuple::freetuple(t2); } } @@ -480,7 +480,7 @@ void merge_iterators(int xid, ltable->merge_mgr->wrote_tuple(stats->merge_level, t1); i += t1->byte_length(); } - datatuple::freetuple(t1); + dataTuple::freetuple(t1); //advance itrA t1 = itrA->next_callerFrees(); diff --git a/merger.h b/merger.h index 81f28c7..c7750ee 100644 --- a/merger.h +++ b/merger.h @@ -23,10 +23,10 @@ #include -class merge_scheduler { +class mergeScheduler { public: - merge_scheduler(blsm * ltable); - ~merge_scheduler(); + mergeScheduler(bLSM * ltable); + ~mergeScheduler(); void start(); void shutdown(); @@ -37,7 +37,7 @@ public: private: pthread_t mem_merge_thread_; pthread_t disk_merge_thread_; - blsm * ltable_; + bLSM * ltable_; const double MIN_R; }; diff --git a/regionAllocator.h b/regionAllocator.h index 782ea1e..753a105 100644 --- a/regionAllocator.h +++ b/regionAllocator.h @@ -24,12 +24,12 @@ #include -class RegionAllocator +class regionAllocator { public: // Open an existing region allocator. - RegionAllocator(int xid, recordid rid) : + regionAllocator(int xid, recordid rid) : nextPage_(INVALID_PAGE), endOfRegion_(INVALID_PAGE), bm_((stasis_buffer_manager_t*)stasis_runtime_buffer_manager()), @@ -39,7 +39,7 @@ public: regionCount_ = TarrayListLength(xid, header_.region_list); } // Create a new region allocator. - RegionAllocator(int xid, pageid_t region_page_count) : + regionAllocator(int xid, pageid_t region_page_count) : nextPage_(0), endOfRegion_(0), regionCount_(0), @@ -51,7 +51,7 @@ public: header_.region_page_count = region_page_count; Tset(xid, rid_, &header_); } - explicit RegionAllocator() : + explicit regionAllocator() : nextPage_(INVALID_PAGE), endOfRegion_(INVALID_PAGE), bm_((stasis_buffer_manager_t*)stasis_runtime_buffer_manager()), @@ -59,7 +59,7 @@ public: rid_.page = INVALID_PAGE; regionCount_ = -1; } - ~RegionAllocator() { + ~regionAllocator() { bm_->closeHandleImpl(bm_, bmh_); } Page * load_page(int xid, pageid_t p) { return bm_->loadPageImpl(bm_, bmh_, xid, p, UNKNOWN_TYPE_PAGE); } diff --git a/servers/mapkeeper/blsmRequestHandler.cpp b/servers/mapkeeper/blsmRequestHandler.cpp index e965f7b..5eee448 100644 --- a/servers/mapkeeper/blsmRequestHandler.cpp +++ b/servers/mapkeeper/blsmRequestHandler.cpp @@ -89,14 +89,14 @@ LSMServerHandler(int argc, char **argv) } pthread_mutex_init(&mutex_, 0); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); recordid table_root = ROOT_RECORD; { - ltable_ = new blsm(log_mode, c0_size); + ltable_ = new bLSM(log_mode, c0_size); ltable_->expiry = expiry_delta; if(TrecordType(xid, ROOT_RECORD) == INVALID_SLOT) { @@ -111,7 +111,7 @@ LSMServerHandler(int argc, char **argv) } Tcommit(xid); - merge_scheduler * mscheduler = new merge_scheduler(ltable_); + mergeScheduler * mscheduler = new mergeScheduler(ltable_); mscheduler->start(); ltable_->replayLog(); @@ -133,21 +133,21 @@ initNextDatabaseId() { nextDatabaseId_ = 1; uint32_t id = 0; - datatuple* start = buildTuple(id, ""); - datatuple* end = buildTuple(id + 1, ""); - blsm::iterator* itr = new blsm::iterator(ltable_, start); - datatuple* current; + dataTuple* start = buildTuple(id, ""); + dataTuple* end = buildTuple(id + 1, ""); + bLSM::iterator* itr = new bLSM::iterator(ltable_, start); + dataTuple* current; while ((current = itr->getnext())) { // are we at the end of range? - if (datatuple::compare_obj(current, end) >= 0) { - datatuple::freetuple(current); + if (dataTuple::compare_obj(current, end) >= 0) { + dataTuple::freetuple(current); break; } uint32_t currentId = *((uint32_t*)(current->data())); if (currentId > nextDatabaseId_) { nextDatabaseId_ = currentId; } - datatuple::freetuple(current); + dataTuple::freetuple(current); } nextDatabaseId_++; delete itr; @@ -178,15 +178,15 @@ shutdown() exit(0); // xxx hack return mapkeeper::ResponseCode::Success; } -std::string pp_tuple(datatuple * tuple) { +std::string pp_tuple(dataTuple * tuple) { std::string key((const char*)tuple->rawkey(), (size_t)tuple->rawkeylen()); return key; } ResponseCode::type LSMServerHandler:: -insert(datatuple* tuple) +insert(dataTuple* tuple) { ltable_->insertTuple(tuple); - datatuple::freetuple(tuple); + dataTuple::freetuple(tuple); return mapkeeper::ResponseCode::Success; } @@ -194,10 +194,10 @@ ResponseCode::type LSMServerHandler:: addMap(const std::string& databaseName) { uint32_t id = nextDatabaseId(); - datatuple* tup = buildTuple(0, databaseName, (void*)&id, (uint32_t)(sizeof(id))); - datatuple* ret = get(tup); + dataTuple* tup = buildTuple(0, databaseName, (void*)&id, (uint32_t)(sizeof(id))); + dataTuple* ret = get(tup); if (ret) { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); if(trace) { fprintf(trace, "MapExists = addMap(%s)\n", databaseName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::MapExists; } @@ -213,35 +213,35 @@ dropMap(const std::string& databaseName) if(trace) { fprintf(trace, "MapNotFound = dropMap(%s)\n", databaseName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::MapNotFound; } - datatuple * tup = buildTuple(0, databaseName); - datatuple * exists = get(tup); + dataTuple * tup = buildTuple(0, databaseName); + dataTuple * exists = get(tup); if(exists) { - datatuple::freetuple(exists); + dataTuple::freetuple(exists); - datatuple * startKey = buildTuple(id, ""); - blsm::iterator * itr = new blsm::iterator(ltable_, startKey); - datatuple::freetuple(startKey); - datatuple * current; + dataTuple * startKey = buildTuple(id, ""); + bLSM::iterator * itr = new bLSM::iterator(ltable_, startKey); + dataTuple::freetuple(startKey); + dataTuple * current; // insert tombstone; deletes metadata entry for map; frees tup insert(tup); while(NULL != (current = itr->getnext())) { if(*((uint32_t*)current->strippedkey()) != id) { - datatuple::freetuple(current); + dataTuple::freetuple(current); break; } - datatuple * del = datatuple::create(current->strippedkey(), current->strippedkeylen()); + dataTuple * del = dataTuple::create(current->strippedkey(), current->strippedkeylen()); ltable_->insertTuple(del); - datatuple::freetuple(del); - datatuple::freetuple(current); + dataTuple::freetuple(del); + dataTuple::freetuple(current); } delete itr; if(trace) { fprintf(trace, "Success = dropMap(%s)\n", databaseName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::Success; } else { - datatuple::freetuple(tup); + dataTuple::freetuple(tup); if(trace) { fprintf(trace, "MapNotFound = dropMap(%s)\n", databaseName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::MapNotFound; } @@ -250,19 +250,19 @@ dropMap(const std::string& databaseName) void LSMServerHandler:: listMaps(StringListResponse& _return) { - datatuple * startKey = buildTuple(0, ""); - blsm::iterator * itr = new blsm::iterator(ltable_, startKey); - datatuple::freetuple(startKey); - datatuple * current; + dataTuple * startKey = buildTuple(0, ""); + bLSM::iterator * itr = new bLSM::iterator(ltable_, startKey); + dataTuple::freetuple(startKey); + dataTuple * current; while(NULL != (current = itr->getnext())) { if(*((uint32_t*)current->strippedkey()) != 0) { - datatuple::freetuple(current); + dataTuple::freetuple(current); break; } _return.values.push_back( std::string((char*)(current->strippedkey()) + sizeof(uint32_t), current->strippedkeylen() - sizeof(uint32_t))); - datatuple::freetuple(current); + dataTuple::freetuple(current); } delete itr; if(trace) { fprintf(trace, "... = listMaps()\n"); fflush(trace); } @@ -283,37 +283,37 @@ scan(RecordListResponse& _return, const std::string& databaseName, const ScanOrd return; } - datatuple* start = buildTuple(id, startKey); - datatuple* end; + dataTuple* start = buildTuple(id, startKey); + dataTuple* end; if (endKey.empty()) { end = buildTuple(id + 1, endKey); } else { end = buildTuple(id, endKey); } - blsm::iterator* itr = new blsm::iterator(ltable_, start); + bLSM::iterator* itr = new bLSM::iterator(ltable_, start); int32_t resultSize = 0; while ((maxRecords == 0 || (int32_t)(_return.records.size()) < maxRecords) && (maxBytes == 0 || resultSize < maxBytes)) { - datatuple* current = itr->getnext(); + dataTuple* current = itr->getnext(); if (current == NULL) { _return.responseCode = mapkeeper::ResponseCode::ScanEnded; if(trace) { fprintf(trace, "ScanEnded = scan(...)\n"); fflush(trace); } break; } - int cmp = datatuple::compare_obj(current, start); + int cmp = dataTuple::compare_obj(current, start); if ((!startKeyIncluded) && cmp == 0) { - datatuple::freetuple(current); + dataTuple::freetuple(current); continue; } // are we at the end of range? - cmp = datatuple::compare_obj(current, end); + cmp = dataTuple::compare_obj(current, end); if ((!endKeyIncluded && cmp >= 0) || (endKeyIncluded && cmp > 0)) { - datatuple::freetuple(current); + dataTuple::freetuple(current); _return.responseCode = mapkeeper::ResponseCode::ScanEnded; if(trace) { fprintf(trace, "ScanEnded = scan(...)\n"); fflush(trace); } break; @@ -327,16 +327,16 @@ scan(RecordListResponse& _return, const std::string& databaseName, const ScanOrd rec.value.assign((char*)(current->data()), dataSize); _return.records.push_back(rec); resultSize += keySize + dataSize; - datatuple::freetuple(current); + dataTuple::freetuple(current); } delete itr; } -datatuple* LSMServerHandler:: -get(datatuple* tuple) +dataTuple* LSMServerHandler:: +get(dataTuple* tuple) { // -1 is invalid txn id - datatuple* tup = ltable_->findTuple_first(-1, tuple->rawkey(), tuple->rawkeylen()); + dataTuple* tup = ltable_->findTuple_first(-1, tuple->rawkey(), tuple->rawkeylen()); return tup; } @@ -351,7 +351,7 @@ get(BinaryResponse& _return, const std::string& databaseName, const std::string& return; } - datatuple* recordBody = get(id, recordName); + dataTuple* recordBody = get(id, recordName); if (recordBody == NULL) { // record not found if(trace) { fprintf(trace, "RecordNotFound = get(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } @@ -361,32 +361,32 @@ get(BinaryResponse& _return, const std::string& databaseName, const std::string& if(trace) { fprintf(trace, "Success = get(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } _return.responseCode = mapkeeper::ResponseCode::Success; _return.value.assign((const char*)(recordBody->data()), recordBody->datalen()); - datatuple::freetuple(recordBody); + dataTuple::freetuple(recordBody); } uint32_t LSMServerHandler:: getDatabaseId(const std::string& databaseName) { - datatuple* tup = buildTuple(0, databaseName); - datatuple* databaseId = get(tup); - datatuple::freetuple(tup); + dataTuple* tup = buildTuple(0, databaseName); + dataTuple* databaseId = get(tup); + dataTuple::freetuple(tup); if (databaseId == NULL) { // database not found std::cout << "db not found" << std::endl; return 0; } uint32_t id = *((uint32_t*)(databaseId->data())); - datatuple::freetuple(databaseId); + dataTuple::freetuple(databaseId); return id; } -datatuple* LSMServerHandler:: +dataTuple* LSMServerHandler:: get(uint32_t databaseId, const std::string& recordName) { - datatuple* recordKey = buildTuple(databaseId, recordName); - datatuple* ret = get(recordKey); - datatuple::freetuple(recordKey); + dataTuple* recordKey = buildTuple(databaseId, recordName); + dataTuple* ret = get(recordKey); + dataTuple::freetuple(recordKey); return ret; } @@ -400,7 +400,7 @@ put(const std::string& databaseName, if(trace) { fprintf(trace, "MapNotFound = put(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::MapNotFound; } - datatuple* tup = buildTuple(id, recordName, recordBody); + dataTuple* tup = buildTuple(id, recordName, recordBody); if(trace) { fprintf(trace, "Success = put(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return insert(tup); } @@ -416,19 +416,19 @@ insert(const std::string& databaseName, return mapkeeper::ResponseCode::MapNotFound; } if(!blind_update) { - datatuple* oldRecordBody = get(id, recordName); + dataTuple* oldRecordBody = get(id, recordName); if (oldRecordBody != NULL) { if(oldRecordBody->isDelete()) { - datatuple::freetuple(oldRecordBody); + dataTuple::freetuple(oldRecordBody); } else { - datatuple::freetuple(oldRecordBody); + dataTuple::freetuple(oldRecordBody); if(trace) { fprintf(trace, "RecordExists = insert(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::RecordExists; } } } - datatuple* tup = buildTuple(id, recordName, recordBody); + dataTuple* tup = buildTuple(id, recordName, recordBody); if(trace) { fprintf(trace, "Success = insert(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return insert(tup); } @@ -451,14 +451,14 @@ update(const std::string& databaseName, return mapkeeper::ResponseCode::MapNotFound; } if(!blind_update) { - datatuple* oldRecordBody = get(id, recordName); + dataTuple* oldRecordBody = get(id, recordName); if (oldRecordBody == NULL) { if(trace) { fprintf(trace, "RecordNotFound = update(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::RecordNotFound; } - datatuple::freetuple(oldRecordBody); + dataTuple::freetuple(oldRecordBody); } - datatuple* tup = buildTuple(id, recordName, recordBody); + dataTuple* tup = buildTuple(id, recordName, recordBody); if(trace) { fprintf(trace, "Success = update(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return insert(tup); } @@ -471,37 +471,37 @@ remove(const std::string& databaseName, const std::string& recordName) if(trace) { fprintf(trace, "MapNotFound = remove(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::MapNotFound; } - datatuple* oldRecordBody = get(id, recordName); + dataTuple* oldRecordBody = get(id, recordName); if (oldRecordBody == NULL) { if(trace) { fprintf(trace, "RecordNotFound = remove(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return mapkeeper::ResponseCode::RecordNotFound; } - datatuple::freetuple(oldRecordBody); - datatuple* tup = buildTuple(id, recordName); + dataTuple::freetuple(oldRecordBody); + dataTuple* tup = buildTuple(id, recordName); if(trace) { fprintf(trace, "Success = remove(%s, %s)\n", databaseName.c_str(), recordName.c_str()); fflush(trace); } return insert(tup); } -datatuple* LSMServerHandler:: +dataTuple* LSMServerHandler:: buildTuple(uint32_t databaseId, const std::string& recordName) { return buildTuple(databaseId, recordName, NULL, DELETE); } -datatuple* LSMServerHandler:: +dataTuple* LSMServerHandler:: buildTuple(uint32_t databaseId, const std::string& recordName, const std::string& recordBody) { return buildTuple(databaseId, recordName, recordBody.c_str(), recordBody.size()); } -datatuple* LSMServerHandler:: +dataTuple* LSMServerHandler:: buildTuple(uint32_t databaseId, const std::string& recordName, const void* body, uint32_t bodySize) { uint32_t keySize = sizeof(databaseId) + recordName.size(); unsigned char* key = (unsigned char*)malloc(keySize); *(uint32_t*)key = htonl(databaseId); memcpy(((uint32_t*)key) + 1, recordName.c_str(), recordName.size()); - datatuple *tup = datatuple::create(key, keySize, body, bodySize); + dataTuple *tup = dataTuple::create(key, keySize, body, bodySize); free(key); return tup; } diff --git a/servers/mapkeeper/blsmRequestHandler.h b/servers/mapkeeper/blsmRequestHandler.h index 4e3d416..c59357b 100644 --- a/servers/mapkeeper/blsmRequestHandler.h +++ b/servers/mapkeeper/blsmRequestHandler.h @@ -48,16 +48,16 @@ public: short port; private: - ResponseCode::type insert(datatuple* tuple); + ResponseCode::type insert(dataTuple* tuple); uint32_t getDatabaseId(const std::string& databaseName); uint32_t nextDatabaseId(); - datatuple* get(uint32_t databaseId, const std::string& recordName); - datatuple* get(datatuple* tuple); - datatuple* buildTuple(uint32_t databaseId, const std::string& recordName); - datatuple* buildTuple(uint32_t databaseId, const std::string& recordName, const std::string& recordBody); - datatuple* buildTuple(uint32_t databaseId, const std::string& recordName, const void* body, uint32_t bodySize); + dataTuple* get(uint32_t databaseId, const std::string& recordName); + dataTuple* get(dataTuple* tuple); + dataTuple* buildTuple(uint32_t databaseId, const std::string& recordName); + dataTuple* buildTuple(uint32_t databaseId, const std::string& recordName, const std::string& recordBody); + dataTuple* buildTuple(uint32_t databaseId, const std::string& recordName, const void* body, uint32_t bodySize); void initNextDatabaseId(); - blsm* ltable_; + bLSM* ltable_; uint32_t nextDatabaseId_; pthread_mutex_t mutex_; }; diff --git a/servers/native/benchmarks/lsm_microbenchmarks.cpp b/servers/native/benchmarks/lsm_microbenchmarks.cpp index 076e249..4632eee 100644 --- a/servers/native/benchmarks/lsm_microbenchmarks.cpp +++ b/servers/native/benchmarks/lsm_microbenchmarks.cpp @@ -71,12 +71,12 @@ int main (int argc, char * argv[]) { printf("Hard limit=%lld\n", (long long)((stasis_dirty_page_count_hard_limit*PAGE_SIZE)/MB)); printf("Hard limit is %f pct.\n", 100.0 * ((double)stasis_dirty_page_count_hard_limit)/((double)stasis_buffer_manager_size)); - blsm::init_stasis(); + bLSM::init_stasis(); - RegionAllocator * readableAlloc = NULL; + regionAllocator * readableAlloc = NULL; if(!mode) { int xid = Tbegin(); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages); + regionAllocator * alloc = new regionAllocator(xid, num_pages); printf("Starting first write of %lld mb\n", (long long)mb); struct timeval start, start_sync, stop; double elapsed; gettimeofday(&start, 0); @@ -100,7 +100,7 @@ int main (int argc, char * argv[]) { if(!mode) { int xid = Tbegin(); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages); + regionAllocator * alloc = new regionAllocator(xid, num_pages); printf("Starting write with parallel read of %lld mb\n", (long long)mb); struct timeval start, start_sync, stop; double elapsed; gettimeofday(&start, 0); @@ -134,11 +134,11 @@ int main (int argc, char * argv[]) { struct timeval start, start_sync, stop; double elapsed; printf("Starting write of giant datapage\n"); gettimeofday(&start, 0); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages); - DataPage * dp = new DataPage(xid, num_pages-1, alloc); + regionAllocator * alloc = new regionAllocator(xid, num_pages); + dataPage * dp = new DataPage(xid, num_pages-1, alloc); byte * key = (byte*)calloc(100, 1); byte * val = (byte*)calloc(900, 1); - datatuple * tup = datatuple::create(key, 100, val, 900); + dataTuple * tup = dataTuple::create(key, 100, val, 900); free(key); free(val); while(1) { @@ -160,13 +160,13 @@ int main (int argc, char * argv[]) { struct timeval start, start_sync, stop; double elapsed; printf("Starting write of many small datapages\n"); gettimeofday(&start, 0); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages); + regionAllocator * alloc = new regionAllocator(xid, num_pages); byte * key = (byte*)calloc(100, 1); byte * val = (byte*)calloc(900, 1); - datatuple * tup = datatuple::create(key, 100, val, 900); + dataTuple * tup = dataTuple::create(key, 100, val, 900); free(key); free(val); - DataPage * dp = 0; + dataPage * dp = 0; uint64_t this_count = 0; uint64_t count = 0; uint64_t dp_count = 0; @@ -199,15 +199,15 @@ int main (int argc, char * argv[]) { struct timeval start, start_sync, stop; double elapsed; printf("Starting two parallel writes of many small datapages\n"); gettimeofday(&start, 0); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages/2); - RegionAllocator * alloc2 = new RegionAllocator(xid, num_pages/2); + regionAllocator * alloc = new regionAllocator(xid, num_pages/2); + regionAllocator * alloc2 = new regionAllocator(xid, num_pages/2); byte * key = (byte*)calloc(100, 1); byte * val = (byte*)calloc(900, 1); - datatuple * tup = datatuple::create(key, 100, val, 900); + dataTuple * tup = dataTuple::create(key, 100, val, 900); free(key); free(val); - DataPage * dp = 0; - DataPage * dp2 = 0; + dataPage * dp = 0; + dataPage * dp2 = 0; uint64_t this_count = 0; uint64_t count = 0; uint64_t dp_count = 0; @@ -241,29 +241,29 @@ int main (int argc, char * argv[]) { } - RegionAllocator * read_alloc = NULL; - RegionAllocator * read_alloc2 = NULL; - RegionAllocator * read_alloc3 = NULL; - RegionAllocator * read_alloc4 = NULL; + regionAllocator * read_alloc = NULL; + regionAllocator * read_alloc2 = NULL; + regionAllocator * read_alloc3 = NULL; + regionAllocator * read_alloc4 = NULL; if(!mode) { int xid = Tbegin(); struct timeval start, start_sync, stop; double elapsed; printf("Starting four parallel writes of many small datapages\n"); gettimeofday(&start, 0); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc2 = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc3 = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc4 = new RegionAllocator(xid, num_pages/4); + regionAllocator * alloc = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc2 = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc3 = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc4 = new regionAllocator(xid, num_pages/4); byte * key = (byte*)calloc(100, 1); byte * val = (byte*)calloc(900, 1); - datatuple * tup = datatuple::create(key, 100, val, 900); + dataTuple * tup = dataTuple::create(key, 100, val, 900); free(key); free(val); - DataPage * dp = 0; - DataPage * dp2 = 0; - DataPage * dp3 = 0; - DataPage * dp4 = 0; + dataPage * dp = 0; + dataPage * dp2 = 0; + dataPage * dp3 = 0; + dataPage * dp4 = 0; uint64_t this_count = 0; uint64_t count = 0; uint64_t dp_count = 0; @@ -317,19 +317,19 @@ int main (int argc, char * argv[]) { struct timeval start, start_sync, stop; double elapsed; printf("Starting four parallel writes of many small datapages\n"); gettimeofday(&start, 0); - RegionAllocator * alloc = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc2 = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc3 = new RegionAllocator(xid, num_pages/4); - RegionAllocator * alloc4 = new RegionAllocator(xid, num_pages/4); + regionAllocator * alloc = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc2 = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc3 = new regionAllocator(xid, num_pages/4); + regionAllocator * alloc4 = new regionAllocator(xid, num_pages/4); byte * key = (byte*)calloc(100, 1); byte * val = (byte*)calloc(900, 1); - datatuple * tup = datatuple::create(key, 100, val, 900); + dataTuple * tup = dataTuple::create(key, 100, val, 900); free(key); free(val); - DataPage * dp = 0; - DataPage * dp2 = 0; - DataPage * dp3 = 0; - DataPage * dp4 = 0; + dataPage * dp = 0; + dataPage * dp2 = 0; + dataPage * dp3 = 0; + dataPage * dp4 = 0; uint64_t this_count = 0; uint64_t count = 0; uint64_t dp_count = 0; @@ -348,15 +348,15 @@ int main (int argc, char * argv[]) { pageid_t i3 = regions3[0]; pageid_t i4 = regions4[0]; - DataPage * rdp = new DataPage(xid, 0, i1); - DataPage * rdp2 = new DataPage(xid, 0, i2); - DataPage * rdp3 = new DataPage(xid, 0, i3); - DataPage * rdp4 = new DataPage(xid, 0, i4); + dataPage * rdp = new DataPage(xid, 0, i1); + dataPage * rdp2 = new DataPage(xid, 0, i2); + dataPage * rdp3 = new DataPage(xid, 0, i3); + dataPage * rdp4 = new DataPage(xid, 0, i4); - DataPage::iterator it1 = rdp->begin(); - DataPage::iterator it2 = rdp2->begin(); - DataPage::iterator it3 = rdp3->begin(); - DataPage::iterator it4 = rdp4->begin(); + dataPage::iterator it1 = rdp->begin(); + dataPage::iterator it2 = rdp2->begin(); + dataPage::iterator it3 = rdp3->begin(); + dataPage::iterator it4 = rdp4->begin(); while((count * 1000) < (mb * 1024*1024)) { if((!dp) || !dp->append(tup)) { @@ -375,7 +375,7 @@ int main (int argc, char * argv[]) { dp4 = new DataPage(xid, 2, alloc4); //dp_count++; } - datatuple * t; + dataTuple * t; if((!rdp) || !(t = it1.getnext())) { i1+= rdp->get_page_count(); if(rdp) delete rdp; @@ -384,7 +384,7 @@ int main (int argc, char * argv[]) { it1 = rdp->begin(); t = it1.getnext(); } - if(t) datatuple::freetuple(t); + if(t) dataTuple::freetuple(t); if((!rdp2) || !(t = it2.getnext())) { i2+= rdp2->get_page_count(); if(rdp2) delete rdp2; @@ -393,7 +393,7 @@ int main (int argc, char * argv[]) { it2 = rdp2->begin(); t = it2.getnext(); } - if(t) datatuple::freetuple(t); + if(t) dataTuple::freetuple(t); if((!rdp3) || !(t = it3.getnext())) { i3+= rdp3->get_page_count(); if(rdp3) delete rdp3; @@ -402,7 +402,7 @@ int main (int argc, char * argv[]) { it3 = rdp3->begin(); t = it3.getnext(); } - if(t) datatuple::freetuple(t); + if(t) dataTuple::freetuple(t); if((!rdp4) || !(t = it4.getnext())) { i4+= rdp4->get_page_count(); if(rdp4) delete rdp4; @@ -411,7 +411,7 @@ int main (int argc, char * argv[]) { it4 = rdp4->begin(); t = it4.getnext(); } - if(t) datatuple::freetuple(t); + if(t) dataTuple::freetuple(t); count += 8; this_count++; @@ -441,5 +441,5 @@ int main (int argc, char * argv[]) { } - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/servers/native/benchmarks/tcpclient_noop.cpp b/servers/native/benchmarks/tcpclient_noop.cpp index 8a33bb5..d7429cb 100644 --- a/servers/native/benchmarks/tcpclient_noop.cpp +++ b/servers/native/benchmarks/tcpclient_noop.cpp @@ -38,11 +38,11 @@ char ** thrargv; void * worker (void * arg) { logstore_handle_t * l = util_open_conn(thrargc-2, thrargv+2); for(int i = 0; i < threadopcount; i++) { - datatuple * ret = logstore_client_op(l, OP_DBG_NOOP); + dataTuple * ret = logstore_client_op(l, OP_DBG_NOOP); if(ret == NULL) { perror("No-op failed"); return (void*)-1; } else { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } } logstore_client_close(l); diff --git a/servers/native/logserver.cpp b/servers/native/logserver.cpp index dfeb74c..e4b6874 100644 --- a/servers/native/logserver.cpp +++ b/servers/native/logserver.cpp @@ -37,7 +37,7 @@ #include void *serverLoop(void *args); -void logserver::startserver(blsm *ltable) +void logserver::startserver(bLSM *ltable) { sys_alive = true; this->ltable = ltable; @@ -474,7 +474,7 @@ void * thread_work_fn( void * args) int err = opcode == OP_DONE || opiserror(opcode); //close the conn on failure //step 2: read the first tuple from client - datatuple *tuple = 0, *tuple2 = 0; + dataTuple *tuple = 0, *tuple2 = 0; if(!err) { tuple = readtuplefromsocket(*(item->data->workitem), &err); } // read the second tuple from client if(!err) { tuple2 = readtuplefromsocket(*(item->data->workitem), &err); } @@ -483,8 +483,8 @@ void * thread_work_fn( void * args) if(!err) { err = requestDispatch::dispatch_request(opcode, tuple, tuple2, item->data->ltable, *(item->data->workitem)); } //free the tuple - if(tuple) datatuple::freetuple(tuple); - if(tuple2) datatuple::freetuple(tuple2); + if(tuple) dataTuple::freetuple(tuple); + if(tuple2) dataTuple::freetuple(tuple2); pthread_mutex_lock(item->data->qlock); diff --git a/servers/native/logserver.h b/servers/native/logserver.h index 5d67c08..f8398df 100644 --- a/servers/native/logserver.h +++ b/servers/native/logserver.h @@ -53,7 +53,7 @@ struct pthread_data { int *workitem; //id of the socket to work - blsm *ltable; + bLSM *ltable; bool *sys_alive; #ifdef STATS_ENABLED @@ -111,7 +111,7 @@ public: delete qlock; } - void startserver(blsm *ltable); + void startserver(bLSM *ltable); void stopserver(); @@ -140,7 +140,7 @@ private: int * self_pipe; // write a byte to self_pipe[1] to wake up select(). std::vector th_list; // list of threads - blsm *ltable; + bLSM *ltable; #ifdef STATS_ENABLED int num_reqs; diff --git a/servers/native/network.h b/servers/native/network.h index 936dffb..798a485 100644 --- a/servers/native/network.h +++ b/servers/native/network.h @@ -278,7 +278,7 @@ static inline int writeoptosocket(int sockd, network_op_t op) { */ -static inline datatuple* readtuplefromsocket(FILE * sockf, int * err) { +static inline dataTuple* readtuplefromsocket(FILE * sockf, int * err) { len_t keylen, datalen, buflen; @@ -286,12 +286,12 @@ static inline datatuple* readtuplefromsocket(FILE * sockf, int * err) { if(keylen == DELETE) return NULL; // *err is zero. if(( *err = readfromsocket(sockf, &datalen, sizeof(datalen)) )) return NULL; - buflen = datatuple::length_from_header(keylen, datalen); + buflen = dataTuple::length_from_header(keylen, datalen); byte* bytes = (byte*) malloc(buflen); if(( *err = readfromsocket(sockf, bytes, buflen) )) { free(bytes); return NULL; } - datatuple * ret = datatuple::from_bytes(keylen, datalen, bytes); + dataTuple * ret = dataTuple::from_bytes(keylen, datalen, bytes); free(bytes); return ret; } @@ -301,7 +301,7 @@ static inline datatuple* readtuplefromsocket(FILE * sockf, int * err) { @param error will be set to zero on succes, a logstore error number on failure @return a datatuple, or NULL. */ -static inline datatuple* readtuplefromsocket(int sockd, int * err) { +static inline dataTuple* readtuplefromsocket(int sockd, int * err) { len_t keylen, datalen, buflen; @@ -309,7 +309,7 @@ static inline datatuple* readtuplefromsocket(int sockd, int * err) { if(keylen == DELETE) return NULL; // *err is zero. if(( *err = readfromsocket(sockd, &datalen, sizeof(datalen)) )) return NULL; - buflen = datatuple::length_from_header(keylen, datalen); + buflen = dataTuple::length_from_header(keylen, datalen); // TODO remove the malloc / free in readtuplefromsocket, either with a // two-stage API for datatuple::create, or with realloc. @@ -317,7 +317,7 @@ static inline datatuple* readtuplefromsocket(int sockd, int * err) { if(( *err = readfromsocket(sockd, bytes, buflen) )) return NULL; - datatuple * ret = datatuple::from_bytes(keylen, datalen, bytes); + dataTuple * ret = dataTuple::from_bytes(keylen, datalen, bytes); free(bytes); return ret; } @@ -329,7 +329,7 @@ static inline int writeendofiteratortosocket(FILE * sockf) { static inline int writeendofiteratortosocket(int sockd) { return writetosocket(sockd, &DELETE, sizeof(DELETE)); } -static inline int writetupletosocket(FILE * sockf, const datatuple *tup) { +static inline int writetupletosocket(FILE * sockf, const dataTuple *tup) { len_t keylen, datalen; int err; @@ -339,11 +339,11 @@ static inline int writetupletosocket(FILE * sockf, const datatuple *tup) { const byte* buf = tup->get_bytes(&keylen, &datalen); if(( err = writetosocket(sockf, &keylen, sizeof(keylen)) )) return err; if(( err = writetosocket(sockf, &datalen, sizeof(datalen)) )) return err; - if(( err = writetosocket(sockf, buf, datatuple::length_from_header(keylen, datalen)) )) return err; + if(( err = writetosocket(sockf, buf, dataTuple::length_from_header(keylen, datalen)) )) return err; } return 0; } -static inline int writetupletosocket(int sockd, const datatuple* tup) { +static inline int writetupletosocket(int sockd, const dataTuple* tup) { len_t keylen, datalen; int err; @@ -353,7 +353,7 @@ static inline int writetupletosocket(int sockd, const datatuple* tup) { const byte* buf = tup->get_bytes(&keylen, &datalen); if(( err = writetosocket(sockd, &keylen, sizeof(keylen)) )) return err; if(( err = writetosocket(sockd, &datalen, sizeof(datalen)) )) return err; - if(( err = writetosocket(sockd, buf, datatuple::length_from_header(keylen, datalen)) )) return err; + if(( err = writetosocket(sockd, buf, dataTuple::length_from_header(keylen, datalen)) )) return err; } return 0; diff --git a/servers/native/newserver.cpp b/servers/native/newserver.cpp index d728f73..9aeec24 100644 --- a/servers/native/newserver.cpp +++ b/servers/native/newserver.cpp @@ -61,14 +61,14 @@ int main(int argc, char *argv[]) } } - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); recordid table_root = ROOT_RECORD; { - blsm ltable(log_mode, c0_size); + bLSM ltable(log_mode, c0_size); ltable.expiry = expiry_delta; if(TrecordType(xid, ROOT_RECORD) == INVALID_SLOT) { @@ -83,7 +83,7 @@ int main(int argc, char *argv[]) } Tcommit(xid); - merge_scheduler * mscheduler = new merge_scheduler(<able); + mergeScheduler * mscheduler = new mergeScheduler(<able); mscheduler->start(); ltable.replayLog(); @@ -101,7 +101,7 @@ int main(int argc, char *argv[]) printf("Deinitializing stasis...\n"); fflush(stdout); } - blsm::deinit_stasis(); + bLSM::deinit_stasis(); printf("Shutdown complete\n"); } diff --git a/servers/native/requestDispatch.cpp b/servers/native/requestDispatch.cpp index 2d2c1aa..510fd2b 100644 --- a/servers/native/requestDispatch.cpp +++ b/servers/native/requestDispatch.cpp @@ -22,23 +22,23 @@ #include "regionAllocator.h" template -inline int requestDispatch::op_insert(blsm * ltable, HANDLE fd, datatuple * tuple) { +inline int requestDispatch::op_insert(bLSM * ltable, HANDLE fd, dataTuple * tuple) { //insert/update/delete ltable->insertTuple(tuple); //step 4: send response return writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); } template -inline int requestDispatch::op_test_and_set(blsm * ltable, HANDLE fd, datatuple * tuple, datatuple * tuple2) { +inline int requestDispatch::op_test_and_set(bLSM * ltable, HANDLE fd, dataTuple * tuple, dataTuple * tuple2) { //insert/update/delete bool succ = ltable->testAndSetTuple(tuple, tuple2); //step 4: send response return writeoptosocket(fd, succ ? LOGSTORE_RESPONSE_SUCCESS : LOGSTORE_RESPONSE_FAIL); } template -inline int requestDispatch::op_bulk_insert(blsm *ltable, HANDLE fd) { +inline int requestDispatch::op_bulk_insert(bLSM *ltable, HANDLE fd) { int err = writeoptosocket(fd, LOGSTORE_RESPONSE_RECEIVING_TUPLES); - datatuple ** tups = (datatuple **) malloc(sizeof(tups[0]) * 100); + dataTuple ** tups = (dataTuple **) malloc(sizeof(tups[0]) * 100); int tups_size = 100; int cur_tup_count = 0; while((tups[cur_tup_count] = readtuplefromsocket(fd, &err))) { @@ -46,34 +46,34 @@ inline int requestDispatch::op_bulk_insert(blsm *ltable, HANDLE fd) { if(cur_tup_count == tups_size) { ltable->insertManyTuples(tups, cur_tup_count); for(int i = 0; i < cur_tup_count; i++) { - datatuple::freetuple(tups[i]); + dataTuple::freetuple(tups[i]); } cur_tup_count = 0; } } ltable->insertManyTuples(tups, cur_tup_count); for(int i = 0; i < cur_tup_count; i++) { - datatuple::freetuple(tups[i]); + dataTuple::freetuple(tups[i]); } free(tups); if(!err) err = writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); return err; } template -inline int requestDispatch::op_find(blsm * ltable, HANDLE fd, datatuple * tuple) { +inline int requestDispatch::op_find(bLSM * ltable, HANDLE fd, dataTuple * tuple) { //find the tuple - datatuple *dt = ltable->findTuple_first(-1, tuple->strippedkey(), tuple->strippedkeylen()); + dataTuple *dt = ltable->findTuple_first(-1, tuple->strippedkey(), tuple->strippedkeylen()); #ifdef STATS_ENABLED if(dt == 0) { - DEBUG("key not found:\t%s\n", datatuple::key_to_str(tuple.key()).c_str()); + DEBUG("key not found:\t%s\n", dataTuple::key_to_str(tuple.key()).c_str()); } else if( dt->datalen() != 1024) { - DEBUG("data len for\t%s:\t%d\n", datatuple::key_to_str(tuple.key()).c_str(), + DEBUG("data len for\t%s:\t%d\n", dataTuple::key_to_str(tuple.key()).c_str(), dt->datalen); - if(datatuple::compare(tuple->key(), tuple->keylen(), dt->key(), dt->keylen()) != 0) { - DEBUG("key not equal:\t%s\t%s\n", datatuple::key_to_str(tuple.key()).c_str(), - datatuple::key_to_str(dt->key).c_str()); + if(dataTuple::compare(tuple->key(), tuple->keylen(), dt->key(), dt->keylen()) != 0) { + DEBUG("key not equal:\t%s\t%s\n", dataTuple::key_to_str(tuple.key()).c_str(), + dataTuple::key_to_str(dt->key).c_str()); } } @@ -100,27 +100,27 @@ inline int requestDispatch::op_find(blsm * ltable, HANDLE fd, datatuple } //free datatuple if(dt_needs_free) { - datatuple::freetuple(dt); + dataTuple::freetuple(dt); } return err; } template -inline int requestDispatch::op_scan(blsm * ltable, HANDLE fd, datatuple * tuple, datatuple * tuple2, size_t limit) { +inline int requestDispatch::op_scan(bLSM * ltable, HANDLE fd, dataTuple * tuple, dataTuple * tuple2, size_t limit) { size_t count = 0; int err = writeoptosocket(fd, LOGSTORE_RESPONSE_SENDING_TUPLES); if(!err) { - blsm::iterator * itr = new blsm::iterator(ltable, tuple); - datatuple * t; + bLSM::iterator * itr = new bLSM::iterator(ltable, tuple); + dataTuple * t; while(!err && (t = itr->getnext())) { if(tuple2) { // are we at the end of range? - if(datatuple::compare_obj(t, tuple2) >= 0) { - datatuple::freetuple(t); + if(dataTuple::compare_obj(t, tuple2) >= 0) { + dataTuple::freetuple(t); break; } } err = writetupletosocket(fd, t); - datatuple::freetuple(t); + dataTuple::freetuple(t); count ++; if(count == limit) { break; } // did we hit limit? } @@ -130,17 +130,17 @@ inline int requestDispatch::op_scan(blsm * ltable, HANDLE fd, datatuple return err; } template -inline int requestDispatch::op_flush(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_flush(bLSM * ltable, HANDLE fd) { ltable->flushTable(); return writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); } template -inline int requestDispatch::op_shutdown(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_shutdown(bLSM * ltable, HANDLE fd) { ltable->accepting_new_requests = false; return writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); } template -inline int requestDispatch::op_stat_space_usage(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_stat_space_usage(bLSM * ltable, HANDLE fd) { int xid = Tbegin(); @@ -223,7 +223,7 @@ inline int requestDispatch::op_stat_space_usage(blsm * ltable, HANDLE fd Tcommit(xid); uint64_t filesize = max_off * PAGE_SIZE; - datatuple *tup = datatuple::create(&treesize, sizeof(treesize), &filesize, sizeof(filesize)); + dataTuple *tup = dataTuple::create(&treesize, sizeof(treesize), &filesize, sizeof(filesize)); DEBUG("tree size: %lld, filesize %lld\n", treesize, filesize); @@ -233,25 +233,25 @@ inline int requestDispatch::op_stat_space_usage(blsm * ltable, HANDLE fd if(!err){ err = writeendofiteratortosocket(fd); } - datatuple::freetuple(tup); + dataTuple::freetuple(tup); return err; } template -inline int requestDispatch::op_stat_perf_report(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_stat_perf_report(bLSM * ltable, HANDLE fd) { } template -inline int requestDispatch::op_stat_histogram(blsm * ltable, HANDLE fd, size_t limit) { +inline int requestDispatch::op_stat_histogram(bLSM * ltable, HANDLE fd, size_t limit) { if(limit < 3) { return writeoptosocket(fd, LOGSTORE_PROTOCOL_ERROR); } int xid = Tbegin(); - RegionAllocator * ro_alloc = new RegionAllocator(); + regionAllocator * ro_alloc = new regionAllocator(); diskTreeComponent::internalNodes::iterator * it = new diskTreeComponent::internalNodes::iterator(xid, ro_alloc, ltable->get_tree_c2()->get_root_rid()); size_t count = 0; int err = 0; @@ -269,12 +269,12 @@ inline int requestDispatch::op_stat_histogram(blsm * ltable, HANDLE fd, stride = 1; } - datatuple * tup = datatuple::create(&stride, sizeof(stride)); + dataTuple * tup = dataTuple::create(&stride, sizeof(stride)); if(!err) { err = writeoptosocket(fd, LOGSTORE_RESPONSE_SENDING_TUPLES); } if(!err) { err = writetupletosocket(fd, tup); } - datatuple::freetuple(tup); + dataTuple::freetuple(tup); size_t cur_stride = 0; size_t i = 0; @@ -284,11 +284,11 @@ inline int requestDispatch::op_stat_histogram(blsm * ltable, HANDLE fd, if(i == count || !cur_stride) { // do we want to send this key? (this matches the first, last and interior keys) byte * key; size_t keylen= it->key(&key); - tup = datatuple::create(key, keylen); + tup = dataTuple::create(key, keylen); if(!err) { err = writetupletosocket(fd, tup); } - datatuple::freetuple(tup); + dataTuple::freetuple(tup); cur_stride = stride; } cur_stride--; @@ -302,7 +302,7 @@ inline int requestDispatch::op_stat_histogram(blsm * ltable, HANDLE fd, return err; } template -inline int requestDispatch::op_dbg_blockmap(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_dbg_blockmap(bLSM * ltable, HANDLE fd) { // produce a list of stasis regions int xid = Tbegin(); @@ -406,9 +406,9 @@ inline int requestDispatch::op_dbg_blockmap(blsm * ltable, HANDLE fd) { } template -inline int requestDispatch::op_dbg_drop_database(blsm * ltable, HANDLE fd) { - blsm::iterator * itr = new blsm::iterator(ltable); - datatuple * del; +inline int requestDispatch::op_dbg_drop_database(bLSM * ltable, HANDLE fd) { + bLSM::iterator * itr = new bLSM::iterator(ltable); + dataTuple * del; fprintf(stderr, "DROPPING DATABASE...\n"); long long n = 0; while((del = itr->getnext())) { @@ -425,18 +425,18 @@ inline int requestDispatch::op_dbg_drop_database(blsm * ltable, HANDLE f printf("? %lld %s\n", n, (char*)del->rawkey()); fflush(stdout); } } - datatuple::freetuple(del); + dataTuple::freetuple(del); } delete itr; fprintf(stderr, "...DROP DATABASE COMPLETE\n"); return writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); } template -inline int requestDispatch::op_dbg_noop(blsm * ltable, HANDLE fd) { +inline int requestDispatch::op_dbg_noop(bLSM * ltable, HANDLE fd) { return writeoptosocket(fd, LOGSTORE_RESPONSE_SUCCESS); } template -inline int requestDispatch::op_dbg_set_log_mode(blsm * ltable, HANDLE fd, datatuple * tuple) { +inline int requestDispatch::op_dbg_set_log_mode(bLSM * ltable, HANDLE fd, dataTuple * tuple) { if(tuple->rawkeylen() != sizeof(int)) { abort(); return writeoptosocket(fd, LOGSTORE_PROTOCOL_ERROR); @@ -448,7 +448,7 @@ inline int requestDispatch::op_dbg_set_log_mode(blsm * ltable, HANDLE fd } } template -int requestDispatch::dispatch_request(HANDLE f, blsm *ltable) { +int requestDispatch::dispatch_request(HANDLE f, bLSM *ltable) { //step 1: read the opcode network_op_t opcode = readopfromsocket(f, LOGSTORE_CLIENT_REQUEST); if(opcode == LOGSTORE_CONN_CLOSED_ERROR) { @@ -459,7 +459,7 @@ int requestDispatch::dispatch_request(HANDLE f, blsm *ltable) { int err = opcode == OP_DONE || opiserror(opcode); //close the conn on failure //step 2: read the first tuple from client - datatuple *tuple = 0, *tuple2 = 0; + dataTuple *tuple = 0, *tuple2 = 0; if(!err) { tuple = readtuplefromsocket(f, &err); } // read the second tuple from client if(!err) { tuple2 = readtuplefromsocket(f, &err); } @@ -468,8 +468,8 @@ int requestDispatch::dispatch_request(HANDLE f, blsm *ltable) { if(!err) { err = dispatch_request(opcode, tuple, tuple2, ltable, f); } //free the tuple - if(tuple) datatuple::freetuple(tuple); - if(tuple2) datatuple::freetuple(tuple2); + if(tuple) dataTuple::freetuple(tuple); + if(tuple2) dataTuple::freetuple(tuple2); // Deal with old work_queue item by freeing it or putting it back in the queue. @@ -485,7 +485,7 @@ int requestDispatch::dispatch_request(HANDLE f, blsm *ltable) { } template -int requestDispatch::dispatch_request(network_op_t opcode, datatuple * tuple, datatuple * tuple2, blsm * ltable, HANDLE fd) { +int requestDispatch::dispatch_request(network_op_t opcode, dataTuple * tuple, dataTuple * tuple2, bLSM * ltable, HANDLE fd) { int err = 0; #if 0 if(tuple) { diff --git a/servers/native/requestDispatch.h b/servers/native/requestDispatch.h index c36b528..0318f3d 100644 --- a/servers/native/requestDispatch.h +++ b/servers/native/requestDispatch.h @@ -13,23 +13,23 @@ template class requestDispatch { private: - static inline int op_insert(blsm * ltable, HANDLE fd, datatuple * tuple); - static inline int op_test_and_set(blsm * ltable, HANDLE fd, datatuple * tuple, datatuple * tuple2); - static inline int op_find(blsm * ltable, HANDLE fd, datatuple * tuple); - static inline int op_scan(blsm * ltable, HANDLE fd, datatuple * tuple, datatuple * tuple2, size_t limit); - static inline int op_bulk_insert(blsm * ltable, HANDLE fd); - static inline int op_flush(blsm * ltable, HANDLE fd); - static inline int op_shutdown(blsm * ltable, HANDLE fd); - static inline int op_stat_space_usage(blsm * ltable, HANDLE fd); - static inline int op_stat_perf_report(blsm * ltable, HANDLE fd); - static inline int op_stat_histogram(blsm * ltable, HANDLE fd, size_t limit); - static inline int op_dbg_blockmap(blsm * ltable, HANDLE fd); - static inline int op_dbg_drop_database(blsm * ltable, HANDLE fd); - static inline int op_dbg_noop(blsm * ltable, HANDLE fd); - static inline int op_dbg_set_log_mode(blsm * ltable, HANDLE fd, datatuple * tuple); + static inline int op_insert(bLSM * ltable, HANDLE fd, dataTuple * tuple); + static inline int op_test_and_set(bLSM * ltable, HANDLE fd, dataTuple * tuple, dataTuple * tuple2); + static inline int op_find(bLSM * ltable, HANDLE fd, dataTuple * tuple); + static inline int op_scan(bLSM * ltable, HANDLE fd, dataTuple * tuple, dataTuple * tuple2, size_t limit); + static inline int op_bulk_insert(bLSM * ltable, HANDLE fd); + static inline int op_flush(bLSM * ltable, HANDLE fd); + static inline int op_shutdown(bLSM * ltable, HANDLE fd); + static inline int op_stat_space_usage(bLSM * ltable, HANDLE fd); + static inline int op_stat_perf_report(bLSM * ltable, HANDLE fd); + static inline int op_stat_histogram(bLSM * ltable, HANDLE fd, size_t limit); + static inline int op_dbg_blockmap(bLSM * ltable, HANDLE fd); + static inline int op_dbg_drop_database(bLSM * ltable, HANDLE fd); + static inline int op_dbg_noop(bLSM * ltable, HANDLE fd); + static inline int op_dbg_set_log_mode(bLSM * ltable, HANDLE fd, dataTuple * tuple); public: - static int dispatch_request(HANDLE f, blsm * ltable); - static int dispatch_request(network_op_t opcode, datatuple * tuple, datatuple * tuple2, blsm * ltable, HANDLE fd); + static int dispatch_request(HANDLE f, bLSM * ltable); + static int dispatch_request(network_op_t opcode, dataTuple * tuple, dataTuple * tuple2, bLSM * ltable, HANDLE fd); }; #endif /* REQUESTDISPATCH_H_ */ diff --git a/servers/native/server.cpp b/servers/native/server.cpp index 1b84823..c0c7fd6 100644 --- a/servers/native/server.cpp +++ b/servers/native/server.cpp @@ -35,7 +35,7 @@ #include logserver *lserver=0; -merge_scheduler *mscheduler=0; +mergeScheduler *mscheduler=0; /*void ignore_pipe(int param) { @@ -53,7 +53,7 @@ void terminate (int param) printf("Deinitializing stasis...\n"); fflush(stdout); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); exit(0); } @@ -66,7 +66,7 @@ int main(int argc, char *argv[]) prev_fn = signal (SIGINT,terminate); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); @@ -84,7 +84,7 @@ int main(int argc, char *argv[]) printf("note: running w/ 2GB c0 for benchmarking"); // XXX build a separate test server and deployment server? } - blsm ltable(c0_size); + bLSM ltable(c0_size); recordid table_root = ROOT_RECORD; if(TrecordType(xid, ROOT_RECORD) == INVALID_SLOT) { @@ -100,7 +100,7 @@ int main(int argc, char *argv[]) Tcommit(xid); - mscheduler = new merge_scheduler(<able); + mscheduler = new mergeScheduler(<able); mscheduler->start(); lserver = new logserver(100, 32432); diff --git a/servers/native/simpleServer.cpp b/servers/native/simpleServer.cpp index a534e0e..93f6d55 100644 --- a/servers/native/simpleServer.cpp +++ b/servers/native/simpleServer.cpp @@ -67,7 +67,7 @@ void * simpleServer::worker(int self) { } } -simpleServer::simpleServer(blsm * ltable, int max_threads, int port): +simpleServer::simpleServer(bLSM * ltable, int max_threads, int port): ltable(ltable), port(port), max_threads(max_threads), diff --git a/servers/native/simpleServer.h b/servers/native/simpleServer.h index aee8438..b812002 100644 --- a/servers/native/simpleServer.h +++ b/servers/native/simpleServer.h @@ -29,11 +29,11 @@ public: static const int DEFAULT_PORT = 32432; static const int DEFAULT_THREADS = 1000; - simpleServer(blsm * ltable, int max_threads = DEFAULT_THREADS, int port = DEFAULT_PORT); + simpleServer(bLSM * ltable, int max_threads = DEFAULT_THREADS, int port = DEFAULT_PORT); bool acceptLoop(); ~simpleServer(); private: - blsm* ltable; + bLSM* ltable; int port; int max_threads; int * thread_fd; diff --git a/servers/native/tcpclient.cpp b/servers/native/tcpclient.cpp index fd686ff..6c825d6 100644 --- a/servers/native/tcpclient.cpp +++ b/servers/native/tcpclient.cpp @@ -83,7 +83,7 @@ static inline void close_conn(logstore_handle_t *l) { uint8_t logstore_client_op_returns_many(logstore_handle_t *l, - uint8_t opcode, datatuple * tuple, datatuple * tuple2, uint64_t count) { + uint8_t opcode, dataTuple * tuple, dataTuple * tuple2, uint64_t count) { if(l->server_socket < 0) { @@ -144,7 +144,7 @@ logstore_client_op_returns_many(logstore_handle_t *l, } network_op_t -logstore_client_send_tuple(logstore_handle_t *l, datatuple *t) { +logstore_client_send_tuple(logstore_handle_t *l, dataTuple *t) { assert(l->server_fsocket != 0); network_op_t rcode = LOGSTORE_RESPONSE_SUCCESS; int err; @@ -163,45 +163,45 @@ logstore_client_send_tuple(logstore_handle_t *l, datatuple *t) { return rcode; } -datatuple * +dataTuple * logstore_client_next_tuple(logstore_handle_t *l) { assert(l->server_fsocket != 0); // otherwise, then the client forgot to check a return value... int err = 0; - datatuple * ret = readtuplefromsocket(l->server_fsocket, &err); + dataTuple * ret = readtuplefromsocket(l->server_fsocket, &err); if(err) { close_conn(l); if(ret) { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); ret = NULL; } } return ret; } -datatuple * +dataTuple * logstore_client_op(logstore_handle_t *l, - uint8_t opcode, datatuple * tuple, datatuple * tuple2, uint64_t count) + uint8_t opcode, dataTuple * tuple, dataTuple * tuple2, uint64_t count) { network_op_t rcode = logstore_client_op_returns_many(l, opcode, tuple, tuple2, count); if(opiserror(rcode)) { return NULL; } - datatuple * ret = NULL; + dataTuple * ret = NULL; if(rcode == LOGSTORE_RESPONSE_SENDING_TUPLES) { ret = logstore_client_next_tuple(l); if(ret) { - datatuple *nxt = logstore_client_next_tuple(l); + dataTuple *nxt = logstore_client_next_tuple(l); if(nxt) { fprintf(stderr, "Opcode %d returned multiple tuples, but caller expects zero or one. Closing connection.\n", (int)opcode); - datatuple::freetuple(nxt); - datatuple::freetuple(ret); + dataTuple::freetuple(nxt); + dataTuple::freetuple(ret); close_conn(l); ret = 0; } } } else if(rcode == LOGSTORE_RESPONSE_SUCCESS) { - ret = tuple ? tuple : datatuple::create("", 1); + ret = tuple ? tuple : dataTuple::create("", 1); } else { assert(rcode == LOGSTORE_RESPONSE_FAIL); // if this is an invalid response, we should have noticed above ret = 0; diff --git a/servers/native/tcpclient.h b/servers/native/tcpclient.h index ba2265d..1aea644 100644 --- a/servers/native/tcpclient.h +++ b/servers/native/tcpclient.h @@ -28,18 +28,18 @@ typedef struct logstore_handle_t logstore_handle_t; logstore_handle_t * logstore_client_open(const char *host, int portnum, int timeout); -datatuple * logstore_client_op(logstore_handle_t* l, +dataTuple * logstore_client_op(logstore_handle_t* l, uint8_t opcode, - datatuple *tuple = NULL, datatuple *tuple2 = NULL, + dataTuple *tuple = NULL, dataTuple *tuple2 = NULL, uint64_t count = (uint64_t)-1); uint8_t logstore_client_op_returns_many(logstore_handle_t *l, uint8_t opcode, - datatuple * tuple = NULL, datatuple * tuple2 = NULL, + dataTuple * tuple = NULL, dataTuple * tuple2 = NULL, uint64_t count = (uint64_t)-1); -datatuple * logstore_client_next_tuple(logstore_handle_t *l); -uint8_t logstore_client_send_tuple(logstore_handle_t *l, datatuple *tuple = NULL); +dataTuple * logstore_client_next_tuple(logstore_handle_t *l); +uint8_t logstore_client_send_tuple(logstore_handle_t *l, dataTuple *tuple = NULL); int logstore_client_close(logstore_handle_t* l); diff --git a/servers/native/util/change_log_mode.cpp b/servers/native/util/change_log_mode.cpp index 056310c..b9aae65 100644 --- a/servers/native/util/change_log_mode.cpp +++ b/servers/native/util/change_log_mode.cpp @@ -44,14 +44,14 @@ int main(int argc, char * argv[]) { logstore_handle_t * l = util_open_conn(argc, argv); - datatuple * tup = datatuple::create(&mode, sizeof(mode)); + dataTuple * tup = dataTuple::create(&mode, sizeof(mode)); - datatuple * ret = logstore_client_op(l, OP_DBG_SET_LOG_MODE, tup); + dataTuple * ret = logstore_client_op(l, OP_DBG_SET_LOG_MODE, tup); if(ret == NULL) { perror("Changing log mode failed.."); return 3; } else { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } logstore_client_close(l); printf("Log mode changed.\n"); diff --git a/servers/native/util/copy_database.cpp b/servers/native/util/copy_database.cpp index a84b08f..89668ed 100644 --- a/servers/native/util/copy_database.cpp +++ b/servers/native/util/copy_database.cpp @@ -53,7 +53,7 @@ int main(int argc, char * argv[]) { } long long num_tuples = 0; long long size_copied = 0; - datatuple *tup; + dataTuple *tup; int bytes_per_dot = 10 * 1024 * 1024; int dots_per_line = 50; @@ -66,7 +66,7 @@ int main(int argc, char * argv[]) { ret = logstore_client_send_tuple(to, tup); num_tuples ++; size_copied += tup->byte_length(); - datatuple::freetuple(tup); + dataTuple::freetuple(tup); if(ret != LOGSTORE_RESPONSE_SUCCESS) { perror("Send tuple failed"); return 3; } diff --git a/servers/native/util/drop_database.cpp b/servers/native/util/drop_database.cpp index bc9fadb..618452d 100644 --- a/servers/native/util/drop_database.cpp +++ b/servers/native/util/drop_database.cpp @@ -15,11 +15,11 @@ void usage(char * argv[]) { int main(int argc, char * argv[]) { logstore_handle_t * l = util_open_conn(argc, argv); - datatuple * ret = logstore_client_op(l, OP_DBG_DROP_DATABASE); + dataTuple * ret = logstore_client_op(l, OP_DBG_DROP_DATABASE); if(ret == NULL) { perror("Drop database failed"); return 3; } else { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } logstore_client_close(l); printf("Drop database succeeded\n"); diff --git a/servers/native/util/dump_blockmap.cpp b/servers/native/util/dump_blockmap.cpp index fc5eda0..a9e8edf 100644 --- a/servers/native/util/dump_blockmap.cpp +++ b/servers/native/util/dump_blockmap.cpp @@ -31,11 +31,11 @@ int main(int argc, char * argv[]) { int op = OP_DBG_BLOCKMAP; logstore_handle_t * l = util_open_conn(argc, argv); - datatuple * ret = logstore_client_op(l, op); + dataTuple * ret = logstore_client_op(l, op); if(ret == NULL) { perror("Dump blockmap failed."); return 3; } else { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } logstore_client_close(l); printf("Dump blockmap succeeded\n"); diff --git a/servers/native/util/histogram.cpp b/servers/native/util/histogram.cpp index f14d242..7d7d5ac 100644 --- a/servers/native/util/histogram.cpp +++ b/servers/native/util/histogram.cpp @@ -47,7 +47,7 @@ int main(int argc, char * argv[]) { fprintf(stderr, "Histogram request returned logstore error code %d\n", rcode); perror("Histogram generation failed."); return 3; } else { - datatuple *ret; + dataTuple *ret; bool first = true; while(( ret = logstore_client_next_tuple(l) )) { if(first) { @@ -59,7 +59,7 @@ int main(int argc, char * argv[]) { assert(ret->strippedkey()[ret->strippedkeylen()-1] == 0); // check for null terminator. printf("\t%s\n", (char*)ret->strippedkey()); } - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } } diff --git a/servers/native/util/shutdown.cpp b/servers/native/util/shutdown.cpp index cfba3b6..a9aceb1 100644 --- a/servers/native/util/shutdown.cpp +++ b/servers/native/util/shutdown.cpp @@ -16,12 +16,12 @@ void usage(char * argv[]) { int main(int argc, char * argv[]) { logstore_handle_t * l = util_open_conn(argc, argv); - datatuple * ret = logstore_client_op(l, OP_SHUTDOWN); + dataTuple * ret = logstore_client_op(l, OP_SHUTDOWN); if(ret == NULL) { perror("Shutdown failed."); return 3; } else { - datatuple::freetuple(ret); + dataTuple::freetuple(ret); } logstore_client_close(l); printf("Shutdown in progress\n"); diff --git a/servers/native/util/space_usage.cpp b/servers/native/util/space_usage.cpp index 960f572..17a875b 100644 --- a/servers/native/util/space_usage.cpp +++ b/servers/native/util/space_usage.cpp @@ -30,7 +30,7 @@ void usage(char * argv[]) { int main(int argc, char * argv[]) { logstore_handle_t * l = util_open_conn(argc, argv); - datatuple * ret = logstore_client_op(l, OP_STAT_SPACE_USAGE); + dataTuple * ret = logstore_client_op(l, OP_STAT_SPACE_USAGE); if(ret == NULL) { perror("Space usage failed."); return 3; @@ -40,7 +40,7 @@ int main(int argc, char * argv[]) { assert(ret->rawkeylen() == sizeof(uint64_t)); assert(ret->datalen() == sizeof(uint64_t)); printf("Tree is %llu MB Store file is %llu MB\n", (unsigned long long)(*(uint64_t*)ret->rawkey()) / (1024*1024), (unsigned long long)(*(uint64_t*)ret->data()) / (1024*1024)); - datatuple::freetuple(ret); + dataTuple::freetuple(ret); ; return 0; } diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index bba70e1..e73db0a 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -9,6 +9,6 @@ IF( HAVE_STASIS ) CREATE_CHECK(check_mergelarge) CREATE_CHECK(check_mergetuple) CREATE_CHECK(check_rbtree) - CREATE_CLIENT_EXECUTABLE(check_tcpclient) # XXX should build this on non-stasis machines - CREATE_CLIENT_EXECUTABLE(check_tcpbulkinsert) # XXX should build this on non-stasis machines +# CREATE_CLIENT_EXECUTABLE(check_tcpclient) # XXX should build this on non-stasis machines +# CREATE_CLIENT_EXECUTABLE(check_tcpbulkinsert) # XXX should build this on non-stasis machines ENDIF( HAVE_STASIS ) diff --git a/test/check_datapage.cpp b/test/check_datapage.cpp index 5608523..69f30cd 100644 --- a/test/check_datapage.cpp +++ b/test/check_datapage.cpp @@ -46,7 +46,7 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { sync(); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); @@ -67,13 +67,13 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { if(data_arr.size() > NUM_ENTRIES) data_arr.erase(data_arr.begin()+NUM_ENTRIES, data_arr.end()); - RegionAllocator * alloc = new RegionAllocator(xid, 10000); // ~ 10 datapages per region. + regionAllocator * alloc = new regionAllocator(xid, 10000); // ~ 10 datapages per region. printf("Stage 1: Writing %llu keys\n", (unsigned long long)NUM_ENTRIES); int pcount = 1000; int dpages = 0; - DataPage *dp=0; + dataPage *dp=0; int64_t datasize = 0; std::vector dsp; size_t last_i = 0; @@ -81,7 +81,7 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { for(size_t i = 0; i < NUM_ENTRIES; i++) { //prepare the key - datatuple *newtuple = datatuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); + dataTuple *newtuple = dataTuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); datasize += newtuple->byte_length(); if(dp==NULL || !dp->append(newtuple)) @@ -98,9 +98,9 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { delete alloc; Tcommit(xid); xid = Tbegin(); - alloc = new RegionAllocator(xid, 10000); + alloc = new regionAllocator(xid, 10000); - dp = new DataPage(xid, pcount, alloc); + dp = new dataPage(xid, pcount, alloc); // printf("%lld\n", dp->get_start_pid()); bool succ = dp->append(newtuple); assert(succ); @@ -111,13 +111,13 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { if(j >= key_arr.size()) { j = key_arr.size()-1; } bool found = 0; { - DataPage::iterator it = dp->begin(); - datatuple * dt; + dataPage::iterator it = dp->begin(); + dataTuple * dt; while((dt = it.getnext()) != NULL) { if(!strcmp((char*)dt->rawkey(), key_arr[j].c_str())) { found = true; } - datatuple::freetuple(dt); + dataTuple::freetuple(dt); } } if(found) { @@ -140,7 +140,7 @@ void insertWithConcurrentReads(size_t NUM_ENTRIES) { printf("Writes complete.\n"); Tcommit(xid); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } void insertProbeIter(size_t NUM_ENTRIES) @@ -151,7 +151,7 @@ void insertProbeIter(size_t NUM_ENTRIES) sync(); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); @@ -171,7 +171,7 @@ void insertProbeIter(size_t NUM_ENTRIES) if(data_arr.size() > NUM_ENTRIES) data_arr.erase(data_arr.begin()+NUM_ENTRIES, data_arr.end()); - RegionAllocator * alloc = new RegionAllocator(xid, 10000); // ~ 10 datapages per region. + regionAllocator * alloc = new regionAllocator(xid, 10000); // ~ 10 datapages per region. printf("Stage 1: Writing %llu keys\n", (unsigned long long)NUM_ENTRIES); struct timeval start, stop; @@ -181,13 +181,13 @@ void insertProbeIter(size_t NUM_ENTRIES) int pcount = 1000; int dpages = 0; - DataPage *dp=0; + dataPage *dp=0; int64_t datasize = 0; std::vector dsp; for(size_t i = 0; i < NUM_ENTRIES; i++) { //prepare the key - datatuple *newtuple = datatuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); + dataTuple *newtuple = dataTuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); datasize += newtuple->byte_length(); if(dp==NULL || !dp->append(newtuple)) @@ -197,7 +197,7 @@ void insertProbeIter(size_t NUM_ENTRIES) dp->writes_done(); delete dp; - dp = new DataPage(xid, pcount, alloc); + dp = new dataPage(xid, pcount, alloc); bool succ = dp->append(newtuple); assert(succ); @@ -228,15 +228,15 @@ void insertProbeIter(size_t NUM_ENTRIES) int tuplenum = 0; for(int i = 0; i < dpages ; i++) { - DataPage dp(xid, 0, dsp[i]); - DataPage::iterator itr = dp.begin(); - datatuple *dt=0; + dataPage dp(xid, 0, dsp[i]); + dataPage::iterator itr = dp.begin(); + dataTuple *dt=0; while( (dt=itr.getnext()) != NULL) { assert(dt->rawkeylen() == key_arr[tuplenum].length()+1); assert(dt->datalen() == data_arr[tuplenum].length()+1); tuplenum++; - datatuple::freetuple(dt); + dataTuple::freetuple(dt); dt = 0; } @@ -246,7 +246,7 @@ void insertProbeIter(size_t NUM_ENTRIES) Tcommit(xid); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/test/check_gen.cpp b/test/check_gen.cpp index 484631c..6606d1d 100644 --- a/test/check_gen.cpp +++ b/test/check_gen.cpp @@ -29,18 +29,18 @@ int main(int argc, char **argv) sync(); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); - blsm *ltable = new blsm(1000, 10000, 5); + bLSM *ltable = new bLSM(1000, 10000, 5); recordid table_root = ltable->allocTable(xid); Tcommit(xid); xid = Tbegin(); - RegionAllocator * ro_alloc = new RegionAllocator(); + regionAllocator * ro_alloc = new regionAllocator(); diskTreeComponent::internalNodes::iterator * it = new diskTreeComponent::internalNodes::iterator(xid,ro_alloc, ltable->get_tree_c2()->get_root_rid() ); it->close(); @@ -48,7 +48,7 @@ int main(int argc, char **argv) delete ro_alloc; Tcommit(xid); delete ltable; - blsm::deinit_stasis(); + bLSM::deinit_stasis(); diff --git a/test/check_logtable.cpp b/test/check_logtable.cpp index c2efbeb..4940043 100644 --- a/test/check_logtable.cpp +++ b/test/check_logtable.cpp @@ -44,7 +44,7 @@ void insertProbeIter(size_t NUM_ENTRIES) sync(); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); @@ -78,10 +78,10 @@ void insertProbeIter(size_t NUM_ENTRIES) for(size_t i = 0; i < NUM_ENTRIES; i++) { //prepare the tuple - datatuple* newtuple = datatuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); + dataTuple* newtuple = dataTuple::create(key_arr[i].c_str(), key_arr[i].length()+1, data_arr[i].c_str(), data_arr[i].length()+1); ltable_c1->insertTuple(xid, newtuple); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); } printf("\nTREE STRUCTURE\n"); ltable_c1->print_tree(xid); @@ -99,13 +99,13 @@ void insertProbeIter(size_t NUM_ENTRIES) diskTreeComponent::iterator * tree_itr = ltable_c1->open_iterator(); - datatuple *dt=0; + dataTuple *dt=0; while( (dt=tree_itr->next_callerFrees()) != NULL) { assert(dt->rawkeylen() == key_arr[tuplenum].length()+1); assert(dt->datalen() == data_arr[tuplenum].length()+1); tuplenum++; - datatuple::freetuple(dt); + dataTuple::freetuple(dt); dt = 0; } delete(tree_itr); @@ -121,18 +121,18 @@ void insertProbeIter(size_t NUM_ENTRIES) //randomly pick a key int ri = rand()%key_arr.size(); - datatuple *dt = ltable_c1->findTuple(xid, (const datatuple::key_t) key_arr[ri].c_str(), (size_t)key_arr[ri].length()+1); + dataTuple *dt = ltable_c1->findTuple(xid, (const dataTuple::key_t) key_arr[ri].c_str(), (size_t)key_arr[ri].length()+1); assert(dt!=0); assert(dt->rawkeylen() == key_arr[ri].length()+1); assert(dt->datalen() == data_arr[ri].length()+1); - datatuple::freetuple(dt); + dataTuple::freetuple(dt); dt = 0; } printf("Random Reads completed.\n"); Tcommit(xid); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } /** @test diff --git a/test/check_logtree.cpp b/test/check_logtree.cpp index 66b4a35..f9c8b4b 100644 --- a/test/check_logtree.cpp +++ b/test/check_logtree.cpp @@ -54,7 +54,7 @@ void insertProbeIter_str(int NUM_ENTRIES) system("rm -rf stasis_log/"); sync(); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); @@ -134,7 +134,7 @@ void insertProbeIter_str(int NUM_ENTRIES) int64_t count = 0; - RegionAllocator * ro_alloc = new RegionAllocator(); + regionAllocator * ro_alloc = new regionAllocator(); diskTreeComponent::internalNodes::iterator * it = new diskTreeComponent::internalNodes::iterator(xid, ro_alloc, lt->get_root_rec()); while(it->next()) { @@ -156,7 +156,7 @@ void insertProbeIter_str(int NUM_ENTRIES) delete it; delete ro_alloc; Tcommit(xid); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/test/check_merge.cpp b/test/check_merge.cpp index de667ff..d3696d0 100644 --- a/test/check_merge.cpp +++ b/test/check_merge.cpp @@ -45,7 +45,7 @@ void insertProbeIter(size_t NUM_ENTRIES) unlink("logfile.txt"); system("rm -rf stasis_log/"); - blsm::init_stasis(); + bLSM::init_stasis(); //data generation std::vector * data_arr = new std::vector; @@ -70,8 +70,8 @@ void insertProbeIter(size_t NUM_ENTRIES) int xid = Tbegin(); - blsm * ltable = new blsm(10 * 1024 * 1024, 1000, 10000, 5); - merge_scheduler mscheduler(ltable); + bLSM * ltable = new bLSM(10 * 1024 * 1024, 1000, 10000, 5); + mergeScheduler mscheduler(ltable); recordid table_root = ltable->allocTable(xid); @@ -89,7 +89,7 @@ void insertProbeIter(size_t NUM_ENTRIES) for(size_t i = 0; i < NUM_ENTRIES; i++) { //prepare the key - datatuple *newtuple = datatuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1,(*data_arr)[i].c_str(), (*data_arr)[i].length()+1); + dataTuple *newtuple = dataTuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1,(*data_arr)[i].c_str(), (*data_arr)[i].length()+1); /* printf("key: \t, keylen: %u\ndata: datalen: %u\n", @@ -106,7 +106,7 @@ void insertProbeIter(size_t NUM_ENTRIES) gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); } gettimeofday(&stop_tv,0); @@ -132,14 +132,14 @@ void insertProbeIter(size_t NUM_ENTRIES) //get the key uint32_t keylen = (*key_arr)[ri].length()+1; - datatuple::key_t rkey = (datatuple::key_t) malloc(keylen); + dataTuple::key_t rkey = (dataTuple::key_t) malloc(keylen); memcpy((byte*)rkey, (*key_arr)[ri].c_str(), keylen); //for(int j=0; jfindTuple(xid, rkey, keylen); + dataTuple *dt = ltable->findTuple(xid, rkey, keylen); assert(dt!=0); //if(dt!=0) @@ -147,7 +147,7 @@ void insertProbeIter(size_t NUM_ENTRIES) found_tuples++; assert(dt->rawkeylen() == (*key_arr)[ri].length()+1); assert(dt->datalen() == (*data_arr)[ri].length()+1); - datatuple::freetuple(dt); + dataTuple::freetuple(dt); } dt = 0; free(rkey); @@ -168,7 +168,7 @@ void insertProbeIter(size_t NUM_ENTRIES) Tcommit(xid); delete ltable; - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/test/check_mergelarge.cpp b/test/check_mergelarge.cpp index 89461b6..f7d9fe3 100644 --- a/test/check_mergelarge.cpp +++ b/test/check_mergelarge.cpp @@ -43,7 +43,7 @@ void insertProbeIter(size_t NUM_ENTRIES) unlink("logfile.txt"); system("rm -rf stasis_log/"); - blsm::init_stasis(); + bLSM::init_stasis(); //data generation // std::vector * data_arr = new std::vector; @@ -62,8 +62,8 @@ void insertProbeIter(size_t NUM_ENTRIES) int xid = Tbegin(); - blsm *ltable = new blsm(10*1024*1024, 1000, 10000, 100); - merge_scheduler mscheduler(ltable); + bLSM *ltable = new bLSM(10*1024*1024, 1000, 10000, 100); + mergeScheduler mscheduler(ltable); recordid table_root = ltable->allocTable(xid); @@ -85,7 +85,7 @@ void insertProbeIter(size_t NUM_ENTRIES) getnextdata(ditem, 10*8192); //prepare the tuple - datatuple *newtuple = datatuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1, ditem.c_str(), ditem.length()+1); + dataTuple *newtuple = dataTuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1, ditem.c_str(), ditem.length()+1); datasize += newtuple->byte_length(); @@ -94,7 +94,7 @@ void insertProbeIter(size_t NUM_ENTRIES) gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); } gettimeofday(&stop_tv,0); printf("insert time: %6.1f\n", insert_time); @@ -109,7 +109,7 @@ void insertProbeIter(size_t NUM_ENTRIES) printf("merge threads finished.\n"); gettimeofday(&stop_tv,0); printf("run time: %6.1f\n", (tv_to_double(stop_tv) - tv_to_double(start_tv))); - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/test/check_mergetuple.cpp b/test/check_mergetuple.cpp index 6cbbd2c..68111d6 100644 --- a/test/check_mergetuple.cpp +++ b/test/check_mergetuple.cpp @@ -45,7 +45,7 @@ void insertProbeIter(size_t NUM_ENTRIES) sync(); - blsm::init_stasis(); + bLSM::init_stasis(); double delete_freq = .05; double update_freq = .15; @@ -120,8 +120,8 @@ void insertProbeIter(size_t NUM_ENTRIES) int xid = Tbegin(); - blsm *ltable = new blsm(10 * 1024 * 1024, 1000, 1000, 40); - merge_scheduler mscheduler(ltable); + bLSM *ltable = new bLSM(10 * 1024 * 1024, 1000, 1000, 40); + mergeScheduler mscheduler(ltable); recordid table_root = ltable->allocTable(xid); @@ -145,7 +145,7 @@ void insertProbeIter(size_t NUM_ENTRIES) getnextdata(ditem, 8192); //prepare the key - datatuple *newtuple = datatuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1, ditem.c_str(), ditem.length()+1); + dataTuple *newtuple = dataTuple::create((*key_arr)[i].c_str(), (*key_arr)[i].length()+1, ditem.c_str(), ditem.length()+1); datasize += newtuple->byte_length(); @@ -154,7 +154,7 @@ void insertProbeIter(size_t NUM_ENTRIES) gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); double rval = ((rand() % 100)+.0)/100; if( rval < delete_freq) //delete a key @@ -164,14 +164,14 @@ void insertProbeIter(size_t NUM_ENTRIES) { delcount++; - datatuple *deltuple = datatuple::create((*key_arr)[del_index].c_str(), (*key_arr)[del_index].length()+1); + dataTuple *deltuple = dataTuple::create((*key_arr)[del_index].c_str(), (*key_arr)[del_index].length()+1); gettimeofday(&ti_st,0); ltable->insertTuple(deltuple); gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(deltuple); + dataTuple::freetuple(deltuple); del_list.push_back(del_index); @@ -185,14 +185,14 @@ void insertProbeIter(size_t NUM_ENTRIES) getnextdata(ditem, 512); upcount++; - datatuple *uptuple = datatuple::create((*key_arr)[up_index].c_str(), (*key_arr)[up_index].length()+1, + dataTuple *uptuple = dataTuple::create((*key_arr)[up_index].c_str(), (*key_arr)[up_index].length()+1, ditem.c_str(), ditem.length()+1); gettimeofday(&ti_st,0); ltable->insertTuple(uptuple); gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(uptuple); + dataTuple::freetuple(uptuple); } } @@ -219,11 +219,11 @@ void insertProbeIter(size_t NUM_ENTRIES) //get the key uint32_t keylen = (*key_arr)[ri].length()+1; - datatuple::key_t rkey = (datatuple::key_t) malloc(keylen); + dataTuple::key_t rkey = (dataTuple::key_t) malloc(keylen); memcpy((byte*)rkey, (*key_arr)[ri].c_str(), keylen); //find the key with the given tuple - datatuple *dt = ltable->findTuple(xid, rkey, keylen); + dataTuple *dt = ltable->findTuple(xid, rkey, keylen); if(std::find(del_list.begin(), del_list.end(), i) == del_list.end()) { @@ -231,7 +231,7 @@ void insertProbeIter(size_t NUM_ENTRIES) assert(!dt->isDelete()); found_tuples++; assert(dt->rawkeylen() == (*key_arr)[ri].length()+1); - datatuple::freetuple(dt); + dataTuple::freetuple(dt); } else { @@ -239,7 +239,7 @@ void insertProbeIter(size_t NUM_ENTRIES) { assert(dt->rawkeylen() == (*key_arr)[ri].length()+1); assert(dt->isDelete()); - datatuple::freetuple(dt); + dataTuple::freetuple(dt); } } dt = 0; @@ -265,7 +265,7 @@ void insertProbeIter(size_t NUM_ENTRIES) Tcommit(xid); delete ltable; - blsm::deinit_stasis(); + bLSM::deinit_stasis(); } diff --git a/test/check_rbtree.cpp b/test/check_rbtree.cpp index 3b62f59..f4380f6 100644 --- a/test/check_rbtree.cpp +++ b/test/check_rbtree.cpp @@ -63,7 +63,7 @@ void insertProbeIter(size_t NUM_ENTRIES) for(size_t i = 0; i < NUM_ENTRIES; i++) { //prepare the key - datatuple *newtuple = datatuple::create(key_arr[i].c_str(), key_arr[i].length()+1,data_arr[i].c_str(), data_arr[i].length()+1); + dataTuple *newtuple = dataTuple::create(key_arr[i].c_str(), key_arr[i].length()+1,data_arr[i].c_str(), data_arr[i].length()+1); datasize += newtuple->byte_length(); @@ -83,14 +83,14 @@ void insertProbeIter(size_t NUM_ENTRIES) int ri = i; //prepare a search tuple - datatuple *search_tuple = datatuple::create(key_arr[ri].c_str(), key_arr[ri].length()+1); + dataTuple *search_tuple = dataTuple::create(key_arr[ri].c_str(), key_arr[ri].length()+1); //step 1: look in tree_c0 memTreeComponent::rbtree_t::iterator rbitr = rbtree.find(search_tuple); if(rbitr != rbtree.end()) { - datatuple *tuple = *rbitr; + dataTuple *tuple = *rbitr; found_tuples++; assert(tuple->rawkeylen() == key_arr[ri].length()+1); @@ -101,7 +101,7 @@ void insertProbeIter(size_t NUM_ENTRIES) printf("Not in scratch_tree\n"); } - datatuple::freetuple(search_tuple); + dataTuple::freetuple(search_tuple); } printf("found %d\n", found_tuples); } diff --git a/test/check_tcpbulkinsert.cpp b/test/check_tcpbulkinsert.cpp index c753f04..d9ac080 100644 --- a/test/check_tcpbulkinsert.cpp +++ b/test/check_tcpbulkinsert.cpp @@ -145,7 +145,7 @@ void insertProbeIter(size_t NUM_ENTRIES) getnextdata(ditem, 8192); len_t datalen = ditem.length()+1; - datatuple* newtuple = datatuple::create((*key_arr)[i].c_str(), keylen, + dataTuple* newtuple = dataTuple::create((*key_arr)[i].c_str(), keylen, ditem.c_str(), datalen); datasize += newtuple->byte_length(); @@ -159,7 +159,7 @@ void insertProbeIter(size_t NUM_ENTRIES) gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); if(i % 10000 == 0 && i > 0) printf("%llu / %llu inserted.\n", (unsigned long long)i, (unsigned long long)NUM_ENTRIES); @@ -187,10 +187,10 @@ void insertProbeIter(size_t NUM_ENTRIES) //get the key len_t keylen = (*key_arr)[ri].length()+1; - datatuple* searchtuple = datatuple::create((*key_arr)[ri].c_str(), keylen); + dataTuple* searchtuple = dataTuple::create((*key_arr)[ri].c_str(), keylen); //find the key with the given tuple - datatuple *dt = logstore_client_op(l, OP_FIND, searchtuple); + dataTuple *dt = logstore_client_op(l, OP_FIND, searchtuple); assert(dt!=0); assert(!dt->isDelete()); @@ -198,10 +198,10 @@ void insertProbeIter(size_t NUM_ENTRIES) assert(dt->rawkeylen() == (*key_arr)[ri].length()+1); //free dt - datatuple::freetuple(dt); + dataTuple::freetuple(dt); dt = 0; - datatuple::freetuple(searchtuple); + dataTuple::freetuple(searchtuple); } printf("found %d\n", found_tuples); @@ -209,13 +209,13 @@ void insertProbeIter(size_t NUM_ENTRIES) ret = logstore_client_op_returns_many(l, OP_SCAN, NULL, NULL, 0); // start = NULL stop = NULL limit = NONE assert(ret == LOGSTORE_RESPONSE_SENDING_TUPLES); - datatuple * tup; + dataTuple * tup; size_t i = 0; while((tup = logstore_client_next_tuple(l))) { assert(!tup->isDelete()); assert(tup->rawkeylen() == (*key_arr)[i].length()+1); assert(!memcmp(tup->rawkey(), (*key_arr)[i].c_str(), (*key_arr)[i].length())); - datatuple::freetuple(tup); + dataTuple::freetuple(tup); i++; } assert(i == NUM_ENTRIES); diff --git a/test/check_tcpclient.cpp b/test/check_tcpclient.cpp index ad7cb61..65b97b5 100644 --- a/test/check_tcpclient.cpp +++ b/test/check_tcpclient.cpp @@ -137,7 +137,7 @@ void insertProbeIter(size_t NUM_ENTRIES) getnextdata(ditem, 8192); len_t datalen = ditem.length()+1; - datatuple* newtuple = datatuple::create((*key_arr)[i].c_str(), keylen, + dataTuple* newtuple = dataTuple::create((*key_arr)[i].c_str(), keylen, ditem.c_str(), datalen); datasize += newtuple->byte_length(); @@ -145,13 +145,13 @@ void insertProbeIter(size_t NUM_ENTRIES) gettimeofday(&ti_st,0); //send the data - datatuple * ret = logstore_client_op(l, OP_INSERT, newtuple); + dataTuple * ret = logstore_client_op(l, OP_INSERT, newtuple); assert(ret); gettimeofday(&ti_end,0); insert_time += tv_to_double(ti_end) - tv_to_double(ti_st); - datatuple::freetuple(newtuple); + dataTuple::freetuple(newtuple); if(i % 10000 == 0 && i > 0) printf("%llu / %llu inserted.\n", (unsigned long long)i, (unsigned long long)NUM_ENTRIES); @@ -177,10 +177,10 @@ void insertProbeIter(size_t NUM_ENTRIES) //get the key len_t keylen = (*key_arr)[ri].length()+1; - datatuple* searchtuple = datatuple::create((*key_arr)[ri].c_str(), keylen); + dataTuple* searchtuple = dataTuple::create((*key_arr)[ri].c_str(), keylen); //find the key with the given tuple - datatuple *dt = logstore_client_op(l, OP_FIND, searchtuple); + dataTuple *dt = logstore_client_op(l, OP_FIND, searchtuple); assert(dt!=0); assert(!dt->isDelete()); @@ -188,10 +188,10 @@ void insertProbeIter(size_t NUM_ENTRIES) assert(dt->rawkeylen() == (*key_arr)[ri].length()+1); //free dt - datatuple::freetuple(dt); + dataTuple::freetuple(dt); dt = 0; - datatuple::freetuple(searchtuple); + dataTuple::freetuple(searchtuple); } printf("found %d\n", found_tuples); @@ -199,13 +199,13 @@ void insertProbeIter(size_t NUM_ENTRIES) network_op_t ret = logstore_client_op_returns_many(l, OP_SCAN, NULL, NULL, 0); // start = NULL stop = NULL limit = NONE assert(ret == LOGSTORE_RESPONSE_SENDING_TUPLES); - datatuple * tup; + dataTuple * tup; size_t i = 0; while((tup = logstore_client_next_tuple(l))) { assert(!tup->isDelete()); assert(tup->rawkeylen() == (*key_arr)[i].length()+1); assert(!memcmp(tup->rawkey(), (*key_arr)[i].c_str(), (*key_arr)[i].length())); - datatuple::freetuple(tup); + dataTuple::freetuple(tup); i++; } assert(i == NUM_ENTRIES); diff --git a/test/check_testAndSet.cpp b/test/check_testAndSet.cpp index 75603e1..ebd635f 100644 --- a/test/check_testAndSet.cpp +++ b/test/check_testAndSet.cpp @@ -42,7 +42,7 @@ unsigned char vals[NUM_THREADS]; -blsm * ltable; +bLSM * ltable; int myucharcmp(const void * ap, const void * bp) { unsigned char a = *(unsigned char*)ap; @@ -56,11 +56,11 @@ void * worker(void * idp) { while(!succ) { unsigned char key = random() % NUM_THREADS; printf("id = %d key = %d\n", (int)id, (int)key); - datatuple * dt = datatuple::create(&key, sizeof(key), &id, sizeof(id)); - datatuple * dtdelete = datatuple::create(&key, sizeof(key)); + dataTuple * dt = dataTuple::create(&key, sizeof(key), &id, sizeof(id)); + dataTuple * dtdelete = dataTuple::create(&key, sizeof(key)); succ = ltable->testAndSetTuple(dt, dtdelete); - datatuple::freetuple(dt); - datatuple::freetuple(dtdelete); + dataTuple::freetuple(dt); + dataTuple::freetuple(dtdelete); vals[id] = key; } return 0; @@ -73,12 +73,12 @@ void insertProbeIter(size_t NUM_ENTRIES) unlink("logfile.txt"); system("rm -rf stasis_log/"); - blsm::init_stasis(); + bLSM::init_stasis(); int xid = Tbegin(); - ltable = new blsm(10 * 1024 * 1024, 1000, 10000, 5); + ltable = new bLSM(10 * 1024 * 1024, 1000, 10000, 5); - merge_scheduler mscheduler(ltable); + mergeScheduler mscheduler(ltable); recordid table_root = ltable->allocTable(xid); @@ -106,7 +106,7 @@ void insertProbeIter(size_t NUM_ENTRIES) mscheduler.shutdown(); delete ltable; - blsm::deinit_stasis(); + bLSM::deinit_stasis(); printf("\npass\n"); } diff --git a/tuplemerger.cpp b/tuplemerger.cpp index e4487eb..dba6d4a 100644 --- a/tuplemerger.cpp +++ b/tuplemerger.cpp @@ -21,7 +21,7 @@ // t2 is the newer tuple. // we return deletes here. our caller decides what to do with them. -datatuple* tuplemerger::merge(const datatuple *t1, const datatuple *t2) +dataTuple* tupleMerger::merge(const dataTuple *t1, const dataTuple *t2) { if(!(t1->isDelete() || t2->isDelete())) { return (*merge_fp)(t1,t2); @@ -38,7 +38,7 @@ datatuple* tuplemerger::merge(const datatuple *t1, const datatuple *t2) * deletes are handled by the tuplemerger::merge function * so here neither t1 nor t2 is a delete datatuple **/ -datatuple* append_merger(const datatuple *t1, const datatuple *t2) +dataTuple* append_merger(const dataTuple *t1, const dataTuple *t2) { assert(!(t1->isDelete() || t2->isDelete())); len_t rawkeylen = t1->rawkeylen(); @@ -47,7 +47,7 @@ datatuple* append_merger(const datatuple *t1, const datatuple *t2) memcpy(data, t1->data(), t1->datalen()); memcpy(data + t1->datalen(), t2->data(), t2->datalen()); - return datatuple::create(t1->rawkey(), rawkeylen, data, datalen); + return dataTuple::create(t1->rawkey(), rawkeylen, data, datalen); } /** @@ -56,7 +56,7 @@ datatuple* append_merger(const datatuple *t1, const datatuple *t2) * deletes are handled by the tuplemerger::merge function * so here neither t1 nor t2 is a delete datatuple **/ -datatuple* replace_merger(const datatuple *t1, const datatuple *t2) +dataTuple* replace_merger(const dataTuple *t1, const dataTuple *t2) { return t2->create_copy(); } diff --git a/tuplemerger.h b/tuplemerger.h index 8ea39bd..6ff8be5 100644 --- a/tuplemerger.h +++ b/tuplemerger.h @@ -19,26 +19,26 @@ #ifndef _TUPLE_MERGER_H_ #define _TUPLE_MERGER_H_ -struct datatuple; +struct dataTuple; -typedef datatuple* (*merge_fn_t) (const datatuple*, const datatuple *); +typedef dataTuple* (*merge_fn_t) (const dataTuple*, const dataTuple *); -datatuple* append_merger(const datatuple *t1, const datatuple *t2); -datatuple* replace_merger(const datatuple *t1, const datatuple *t2); +dataTuple* append_merger(const dataTuple *t1, const dataTuple *t2); +dataTuple* replace_merger(const dataTuple *t1, const dataTuple *t2); -class tuplemerger +class tupleMerger { public: - tuplemerger(merge_fn_t merge_fp) + tupleMerger(merge_fn_t merge_fp) { this->merge_fp = merge_fp; } - datatuple* merge(const datatuple *t1, const datatuple *t2); + dataTuple* merge(const dataTuple *t1, const dataTuple *t2); private: