From 347a4126f30760c611bf01e2f826b7416f1c0e85 Mon Sep 17 00:00:00 2001 From: sears Date: Sat, 11 Dec 2010 01:04:33 +0000 Subject: [PATCH] remove the NO_SNOWSHOVEL c-preprocessor stuff. There is no way it was working anyway... git-svn-id: svn+ssh://svn.corp.yahoo.com/yahoo/yrl/labs/pnuts/code/logstore@1480 8dad8b1f-cf64-0410-95b6-bcf113ffbcfe --- logstore.cpp | 31 ---------------------- mergeManager.cpp | 28 +++----------------- merger.cpp | 68 ++---------------------------------------------- 3 files changed, 5 insertions(+), 122 deletions(-) diff --git a/logstore.cpp b/logstore.cpp index dabe1ad..ec1fcd2 100644 --- a/logstore.cpp +++ b/logstore.cpp @@ -176,9 +176,6 @@ void logtable::flushTable() gettimeofday(&start_tv,0); start = tv_to_double(start_tv); -#ifdef NO_SNOWSHOVEL - merge_mgr->finished_merge(0); // XXX will deadlock.. -#endif flushing = true; bool blocked = false; @@ -187,11 +184,7 @@ void logtable::flushTable() //this waits for the previous merger of the mem-tree //hopefullly this wont happen -#ifdef NO_SNOWSHOVEL - while(get_tree_c0_mergeable()) { -#else while(get_c0_is_merging()) { -#endif rwlc_cond_wait(&c0_needed, header_mut); blocked = true; if(expmcount != merge_count) { @@ -205,16 +198,10 @@ void logtable::flushTable() gettimeofday(&stop_tv,0); stop = tv_to_double(stop_tv); -#ifdef NO_SNOWSHOVEL - set_tree_c0_mergeable(get_tree_c0()); -#endif pthread_cond_signal(&c0_ready); DEBUG("Signaled c0-c1 merge thread\n"); merge_count ++; -#ifdef NO_SNOWSHOVEL - set_tree_c0(new memTreeComponent::rbtree_t); -#endif c0_stats->starting_merge(); tsize = 0; @@ -540,23 +527,6 @@ datatuple * logtable::insertTupleHelper(datatuple *tuple) } merge_mgr->wrote_tuple(0, t); // needs to be here; doesn't grab a mutex. -#ifdef NO_SNOWSHOVEL - //flushing logic - if(tree_bytes >= max_c0_size ) - { - DEBUG("tree size before merge %d tuples %lld bytes.\n", tsize, tree_bytes); - - // NOTE: we hold rb_mut across the (blocking on merge) flushTable. Therefore: - // *** Blocking in flushTable is REALLY BAD *** - // Because it blocks readers and writers. - // The merge policy does its best to make sure flushTable does not block. - rwlc_writelock(header_mut); - // the test of tree size needs to be atomic with the flushTable, and flushTable needs a writelock. - if(tree_bytes >= max_c0_size) { - flushTable(); // this needs to hold rb_mut if snowshoveling is disabled, but can't hold rb_mut if snowshoveling is enabled. - } - rwlc_unlock(header_mut); -#endif return pre_t; } template @@ -590,7 +560,6 @@ void logtable::insertTuple(datatuple *tuple) pre_t = insertTupleHelper(tuple); pthread_mutex_unlock(&rb_mut); - // XXX is it OK to move this after the NO_SNOWSHOVEL block? if(pre_t) { // needs to be here; calls update_progress, which sometimes grabs mutexes.. merge_mgr->read_tuple_from_large_component(0, pre_t); // was interspersed with the erase, insert above... diff --git a/mergeManager.cpp b/mergeManager.cpp index 47de3d8..d3f410b 100644 --- a/mergeManager.cpp +++ b/mergeManager.cpp @@ -75,15 +75,6 @@ void mergeManager::sleep_on_mini_delta(mergeStats *s, int delta) { } void mergeManager::update_progress(mergeStats * s, int delta) { s->delta += delta; -#if 0 -#ifdef NO_SNOWSHOVEL - if(merge_level < 2 && delta) { -#else - if(merge_level == 1 && delta) { -#endif - sleep_on_mini_delta(s, delta); - } -#endif if((!delta) || s->delta > UPDATE_PROGRESS_DELTA) { rwlc_writelock(ltable->header_mut); @@ -92,9 +83,6 @@ void mergeManager::update_progress(mergeStats * s, int delta) { if(!s->need_tick) { s->need_tick = 1; } } if(s->merge_level == 2 -#ifdef NO_SNOWSHOVEL - || s->merge_level == 1 -#endif ) { if(s->active) { s->in_progress = ((double)(s->bytes_in_large + s->bytes_in_small)) / (double)(get_merge_stats(s->merge_level-1)->mergeable_size + s->base_size); @@ -104,22 +92,17 @@ void mergeManager::update_progress(mergeStats * s, int delta) { } else if(s->merge_level == 1) { // C0-C1 merge (c0 is continuously growing...) if(s->active) { s->in_progress = ((double)(s->bytes_in_large+s->bytes_in_small)) / (double)(s->base_size+ltable->mean_c0_effective_size); -// if(s->in_progress > 0.95) { s->in_progress = 0.95; } -// assert(s->in_progress > -0.01 && s->in_progress < 1.02); } else { s->in_progress = 0; } } -#ifdef NO_SNOWSHOVEL - s->current_size = s->base_size + s->bytes_out - s->bytes_in_large; -#else if(s->merge_level == 0) { s->current_size = ltable->tree_bytes; // we need to track the number of bytes consumed by the merger; this data is not present in s, so fall back on ltable's aggregate. } else { s->current_size = s->base_size + s->bytes_out - s->bytes_in_large; } -#endif + s->out_progress = ((double)s->current_size) / (double)s->target_size; struct timeval now; gettimeofday(&now, 0); @@ -270,12 +253,7 @@ void mergeManager::tick_based_on_merge_progress(mergeStats *s) { */ void mergeManager::tick(mergeStats * s) { if(s->need_tick) { -#ifdef NO_SNOWSHOVEL - bool snowshovel = false; -#else - bool snowshovel = true; -#endif - if((!snowshovel) || s->merge_level == 1) { // apply backpressure based on merge progress. + if(s->merge_level == 1) { // apply backpressure based on merge progress. tick_based_on_merge_progress(s); } else if(s->merge_level == 0) { // Simple backpressure algorithm based on how full C0 is. @@ -437,7 +415,7 @@ void mergeManager::pretty_print(FILE * out) { // ((double)c1_totalConsumed)/((double)c1_totalWorktime), // ((double)c2_totalConsumed)/((double)c2_totalWorktime)); fflush(out); -#ifdef NO_SNOWSHOVEL +#if 0 // XXX would like to bring this back somehow... assert((!c1->active) || (c1->in_progress >= -0.01 && c1->in_progress < 1.02)); #endif assert((!c2->active) || (c2->in_progress >= -0.01 && c2->in_progress < 1.10)); diff --git a/merger.cpp b/merger.cpp index 02cec26..30f4255 100644 --- a/merger.cpp +++ b/merger.cpp @@ -72,30 +72,10 @@ void * merge_scheduler::memMergeThread() { ltable_->merge_mgr->new_merge(1); int done = 0; // 2: wait for c0_mergable -#ifdef NO_SNOWSHOVEL - while(!ltable_->get_tree_c0_mergeable()) - { - pthread_cond_signal(<able_->c0_needed); - - if(!ltable_->is_still_running()){ - done = 1; - break; - } - - DEBUG("mmt:\twaiting for block ready cond\n"); - - rwlc_cond_wait(<able_->c0_ready, ltable_->header_mut); - - DEBUG("mmt:\tblock ready\n"); - - } -#else // the merge iterator will wait until c0 is big enough for us to proceed. if(!ltable_->is_still_running()) { done = 1; } -#endif - if(done==1) { pthread_cond_signal(<able_->c1_ready); // no block is ready. this allows the other thread to wake up, and see that we're shutting down. @@ -112,15 +92,6 @@ void * merge_scheduler::memMergeThread() { //create the iterators diskTreeComponent::iterator *itrA = ltable_->get_tree_c1()->open_iterator(); -#ifdef NO_SNOWSHOVEL - memTreeComponent::iterator *itrB = - new memTreeComponent::iterator(ltable_->get_tree_c0_mergeable()); -#else -// memTreeComponent::revalidatingIterator *itrB = -// new memTreeComponent::revalidatingIterator(ltable_->get_tree_c0(), <able_->rb_mut); -// memTreeComponent::batchedRevalidatingIterator *itrB = -// new memTreeComponent::batchedRevalidatingIterator(ltable_->get_tree_c0(), <able_->tree_bytes, ltable_->max_c0_size, <able_->flushing, 100, <able_->rb_mut); -#endif const int64_t min_bloom_target = ltable_->max_c0_size; //create a new tree @@ -129,11 +100,11 @@ void * merge_scheduler::memMergeThread() { ltable_->set_tree_c1_prime(c1_prime); rwlc_unlock(ltable_->header_mut); -#ifndef NO_SNOWSHOVEL + // needs to be past the rwlc_unlock... memTreeComponent::batchedRevalidatingIterator *itrB = new memTreeComponent::batchedRevalidatingIterator(ltable_->get_tree_c0(), <able_->tree_bytes, ltable_->max_c0_size, <able_->flushing, 100, <able_->rb_mut); -#endif + //: do the merge DEBUG("mmt:\tMerging:\n"); @@ -164,12 +135,6 @@ void * merge_scheduler::memMergeThread() { ltable_->set_tree_c1(c1_prime); ltable_->set_tree_c1_prime(0); -#ifdef NO_SNOWSHOVEL - // 11.5: delete old c0_mergeable - memTreeComponent::tearDownTree(ltable_->get_tree_c0_mergeable()); - // 11: c0_mergeable = NULL - ltable_->set_tree_c0_mergeable(NULL); -#endif ltable_->set_c0_is_merging(false); double new_c1_size = stats->output_size(); pthread_cond_signal(<able_->c0_needed); @@ -289,11 +254,7 @@ void * merge_scheduler::diskMergeThread() // 4: do the merge. //create the iterators diskTreeComponent::iterator *itrA = ltable_->get_tree_c2()->open_iterator(); -#ifdef NO_SNOWSHOVEL - diskTreeComponent::iterator *itrB = ltable_->get_tree_c1_mergeable()->open_iterator(); -#else diskTreeComponent::iterator *itrB = ltable_->get_tree_c1_mergeable()->open_iterator(<able_->merge_mgr->cur_c1_c2_progress_delta, 0.05, <able_->shutting_down_); -#endif //create a new tree diskTreeComponent * c2_prime = new diskTreeComponent(xid, ltable_->internal_region_size, ltable_->datapage_region_size, ltable_->datapage_size, stats, (ltable_->max_c0_size * *ltable_->R() + stats->base_size)/ 1000); @@ -463,39 +424,14 @@ void merge_iterators(int xid, periodically_force(xid, &i, forceMe, log); // cannot free any tuples here; they may still be read through a lookup } -#ifndef NO_SNOWSHOVEL if(stats->merge_level == 1) { next_garbage = garbage_collect(ltable, garbage, garbage_len, next_garbage); garbage[next_garbage] = t2; next_garbage++; } -#if 0 - pthread_mutex_lock(<able->rb_mut); - if(stats->merge_level == 1) { - datatuple * t2tmp = NULL; - { - memTreeComponent::rbtree_t::iterator rbitr = ltable->get_tree_c0()->find(t2); - if(rbitr != ltable->get_tree_c0()->end()) { - t2tmp = *rbitr; - if((t2tmp->datalen() == t2->datalen()) && - !memcmp(t2tmp->data(), t2->data(), t2->datalen())) { - } - } - } - if(t2tmp) { - ltable->get_tree_c0()->erase(t2); - ltable->tree_bytes -= t2->byte_length(); - datatuple::freetuple(t2tmp); - } - } - pthread_mutex_unlock(<able->rb_mut); -#endif if(stats->merge_level != 1) { datatuple::freetuple(t2); } -#else - datatuple::freetuple(t2); -#endif }