2010-01-23 02:13:59 +00:00
|
|
|
#include <math.h>
|
|
|
|
#include "merger.h"
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-03-17 21:51:26 +00:00
|
|
|
#include <stasis/transactional.h>
|
|
|
|
#undef try
|
|
|
|
#undef end
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
static void* memMerge_thr(void* arg) {
|
|
|
|
return ((merge_scheduler*)arg)->memMergeThread();
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
2010-12-11 00:51:19 +00:00
|
|
|
static void* diskMerge_thr(void* arg) {
|
|
|
|
return ((merge_scheduler*)arg)->diskMergeThread();
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
merge_scheduler::merge_scheduler(logtable<datatuple> *ltable) : ltable_(ltable), MIN_R(3.0) { }
|
|
|
|
merge_scheduler::~merge_scheduler() { }
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
void merge_scheduler::shutdown() {
|
|
|
|
ltable_->stop();
|
|
|
|
pthread_join(mem_merge_thread_, 0);
|
|
|
|
pthread_join(disk_merge_thread_, 0);
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
void merge_scheduler::start() {
|
|
|
|
pthread_create(&mem_merge_thread_, 0, memMerge_thr, this);
|
|
|
|
pthread_create(&disk_merge_thread_, 0, diskMerge_thr, this);
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
|
2011-04-22 22:54:49 +00:00
|
|
|
bool insert_filter(logtable<datatuple> * ltable, datatuple * t, bool dropDeletes) {
|
|
|
|
if(t->isDelete()) {
|
|
|
|
if(dropDeletes || ! ltable->mightBeAfterMemMerge(t)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(!ltable->expiry) { return true; }
|
|
|
|
if(t->timestamp() > ltable->current_timestamp + ltable->expiry) { return false; }
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
template <class ITA, class ITB>
|
2010-05-19 23:42:06 +00:00
|
|
|
void merge_iterators(int xid, diskTreeComponent * forceMe,
|
2010-03-05 19:07:47 +00:00
|
|
|
ITA *itrA,
|
|
|
|
ITB *itrB,
|
2010-03-17 21:51:26 +00:00
|
|
|
logtable<datatuple> *ltable,
|
2010-03-13 00:05:06 +00:00
|
|
|
diskTreeComponent *scratch_tree,
|
2010-06-05 00:41:52 +00:00
|
|
|
mergeStats * stats,
|
2010-03-05 19:07:47 +00:00
|
|
|
bool dropDeletes);
|
|
|
|
|
|
|
|
|
2010-02-18 23:31:57 +00:00
|
|
|
/**
|
2010-02-25 01:29:32 +00:00
|
|
|
* Merge algorithm: Outsider's view
|
2010-02-18 23:31:57 +00:00
|
|
|
*<pre>
|
|
|
|
1: while(1)
|
|
|
|
2: wait for c0_mergable
|
|
|
|
3: begin
|
2010-02-25 01:29:32 +00:00
|
|
|
4: merge c0_mergable and c1 into c1' # Blocks; tree must be consistent at this point
|
2010-04-28 21:29:15 +00:00
|
|
|
5: force c1' # Blocks
|
2010-02-25 01:29:32 +00:00
|
|
|
6: if c1' is too big # Blocks; tree must be consistent at this point.
|
|
|
|
7: c1_mergable = c1'
|
2010-02-18 23:31:57 +00:00
|
|
|
8: c1 = new_empty
|
2010-02-25 01:29:32 +00:00
|
|
|
8.5: delete old c1_mergeable # Happens in other thread (not here)
|
|
|
|
9: else
|
|
|
|
10: c1 = c1'
|
|
|
|
11: c0_mergeable = NULL
|
|
|
|
11.5: delete old c0_mergeable
|
|
|
|
12: delete old c1
|
|
|
|
13: commit
|
2010-02-18 23:31:57 +00:00
|
|
|
</pre>
|
2010-02-25 01:29:32 +00:00
|
|
|
Merge algorithm: actual order: 1 2 3 4 5 6 12 11.5 11 [7 8 (9) 10] 13
|
2010-02-18 23:31:57 +00:00
|
|
|
*/
|
2010-12-11 00:51:19 +00:00
|
|
|
void * merge_scheduler::memMergeThread() {
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
int xid;
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
assert(ltable_->get_tree_c1());
|
2010-01-23 02:13:59 +00:00
|
|
|
|
|
|
|
int merge_count =0;
|
2010-12-11 00:51:19 +00:00
|
|
|
mergeStats * stats = ltable_->merge_mgr->get_merge_stats(1);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-02-25 01:29:32 +00:00
|
|
|
while(true) // 1
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_writelock(ltable_->header_mut);
|
|
|
|
ltable_->merge_mgr->new_merge(1);
|
2010-01-23 02:13:59 +00:00
|
|
|
int done = 0;
|
2010-02-25 01:29:32 +00:00
|
|
|
// 2: wait for c0_mergable
|
2010-08-21 03:09:18 +00:00
|
|
|
// the merge iterator will wait until c0 is big enough for us to proceed.
|
2010-12-11 00:51:19 +00:00
|
|
|
if(!ltable_->is_still_running()) {
|
2010-08-05 17:43:46 +00:00
|
|
|
done = 1;
|
|
|
|
}
|
2010-01-23 02:13:59 +00:00
|
|
|
if(done==1)
|
|
|
|
{
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_cond_signal(<able_->c1_ready); // no block is ready. this allows the other thread to wake up, and see that we're shutting down.
|
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-01-23 02:13:59 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-04-29 23:13:04 +00:00
|
|
|
stats->starting_merge();
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2011-04-20 20:17:26 +00:00
|
|
|
lsn_t merge_start = ltable_->get_log_offset();
|
|
|
|
printf("\nstarting memory merge. log offset is %lld\n", merge_start);
|
2010-02-18 23:31:57 +00:00
|
|
|
// 3: Begin transaction
|
|
|
|
xid = Tbegin();
|
|
|
|
|
|
|
|
// 4: Merge
|
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//create the iterators
|
2010-12-11 00:51:19 +00:00
|
|
|
diskTreeComponent::iterator *itrA = ltable_->get_tree_c1()->open_iterator();
|
|
|
|
const int64_t min_bloom_target = ltable_->max_c0_size;
|
2010-12-08 19:49:13 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//create a new tree
|
2010-12-14 01:49:23 +00:00
|
|
|
diskTreeComponent * c1_prime = new diskTreeComponent(xid, ltable_->internal_region_size, ltable_->datapage_region_size, ltable_->datapage_size, stats, (stats->target_size < min_bloom_target ? min_bloom_target : stats->target_size) / 100);
|
2010-02-15 23:02:01 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->set_tree_c1_prime(c1_prime);
|
2010-08-05 17:43:46 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-12-11 01:04:33 +00:00
|
|
|
|
2010-08-21 03:09:18 +00:00
|
|
|
// needs to be past the rwlc_unlock...
|
|
|
|
memTreeComponent<datatuple>::batchedRevalidatingIterator *itrB =
|
2011-03-25 20:05:49 +00:00
|
|
|
new memTreeComponent<datatuple>::batchedRevalidatingIterator(ltable_->get_tree_c0(), ltable_->merge_mgr, ltable_->max_c0_size, <able_->c0_flushing, 100, <able_->rb_mut);
|
2010-12-11 01:04:33 +00:00
|
|
|
|
2010-02-15 23:02:01 +00:00
|
|
|
//: do the merge
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("mmt:\tMerging:\n");
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
merge_iterators<typeof(*itrA),typeof(*itrB)>(xid, c1_prime, itrA, itrB, ltable_, c1_prime, stats, false);
|
2010-02-15 23:02:01 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
delete itrA;
|
|
|
|
delete itrB;
|
2010-02-15 23:02:01 +00:00
|
|
|
|
2010-02-18 23:31:57 +00:00
|
|
|
// 5: force c1'
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_writelock(ltable_->header_mut);
|
2010-05-28 01:29:10 +00:00
|
|
|
|
2010-03-13 00:05:06 +00:00
|
|
|
//force write the new tree to disk
|
|
|
|
c1_prime->force(xid);
|
2010-02-18 23:31:57 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
merge_count++;
|
2010-12-13 22:27:13 +00:00
|
|
|
DEBUG("mmt:\tmerge_count %lld #bytes written %lld\n", stats.stats_merge_count, stats.output_size());
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
// Immediately clean out c0 mergeable so that writers may continue.
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
// first, we need to move the c1' into c1.
|
|
|
|
|
|
|
|
// 12: delete old c1
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->get_tree_c1()->dealloc(xid);
|
|
|
|
delete ltable_->get_tree_c1();
|
2010-05-19 23:42:06 +00:00
|
|
|
|
|
|
|
// 10: c1 = c1'
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->set_tree_c1(c1_prime);
|
|
|
|
ltable_->set_tree_c1_prime(0);
|
2010-05-19 23:42:06 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->set_c0_is_merging(false);
|
2010-05-21 23:43:17 +00:00
|
|
|
double new_c1_size = stats->output_size();
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_cond_signal(<able_->c0_needed);
|
2010-05-19 23:42:06 +00:00
|
|
|
|
2011-04-20 20:17:26 +00:00
|
|
|
ltable_->update_persistent_header(xid, merge_start);
|
2010-05-19 23:42:06 +00:00
|
|
|
Tcommit(xid);
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2011-04-20 20:17:26 +00:00
|
|
|
ltable_->truncate_log();
|
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//TODO: this is simplistic for now
|
2010-02-25 01:29:32 +00:00
|
|
|
//6: if c1' is too big, signal the other merger
|
2010-08-21 03:09:18 +00:00
|
|
|
|
2010-12-17 18:48:26 +00:00
|
|
|
// XXX move this to mergeManager, and make bytes_in_small be protected.
|
2010-12-14 00:06:32 +00:00
|
|
|
if(stats->bytes_in_small) {
|
|
|
|
// update c0 effective size.
|
|
|
|
double frac = 1.0/(double)merge_count;
|
|
|
|
ltable_->num_c0_mergers = merge_count;
|
2010-12-14 01:49:23 +00:00
|
|
|
ltable_->mean_c0_run_length=
|
2010-12-14 00:06:32 +00:00
|
|
|
(int64_t) (
|
2010-12-14 01:49:23 +00:00
|
|
|
((double)ltable_->mean_c0_run_length)*(1-frac) +
|
2010-12-14 00:06:32 +00:00
|
|
|
((double)stats->bytes_in_small*frac));
|
2010-12-14 01:49:23 +00:00
|
|
|
//ltable_->merge_mgr->get_merge_stats(0)->target_size = ltable_->mean_c0_run_length;
|
2010-12-14 00:06:32 +00:00
|
|
|
}
|
|
|
|
|
2010-12-17 18:48:26 +00:00
|
|
|
printf("\nMerge done. R = %f MemSize = %lld Mean = %lld, This = %lld, Count = %d factor %3.3fcur%3.3favg\n", *ltable_->R(), (long long)ltable_->max_c0_size, (long long int)ltable_->mean_c0_run_length, stats->bytes_in_small, merge_count, ((double)stats->bytes_in_small) / (double)ltable_->max_c0_size, ((double)ltable_->mean_c0_run_length) / (double)ltable_->max_c0_size);
|
2010-12-14 00:06:32 +00:00
|
|
|
|
|
|
|
assert(*ltable_->R() >= MIN_R);
|
2011-01-11 19:24:16 +00:00
|
|
|
// XXX don't hardcode 1.05, which will break for R > ~20.
|
|
|
|
bool signal_c2 = (1.05 * new_c1_size / ltable_->mean_c0_run_length > *ltable_->R());
|
2010-12-14 00:06:32 +00:00
|
|
|
DEBUG("\nc1 size %f R %f\n", new_c1_size, *ltable_->R());
|
2010-02-25 01:29:32 +00:00
|
|
|
if( signal_c2 )
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
2010-04-28 21:29:15 +00:00
|
|
|
DEBUG("mmt:\tsignaling C2 for merge\n");
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("mmt:\tnew_c1_size %.2f\tMAX_C0_SIZE %lld\ta->max_size %lld\t targetr %.2f \n", new_c1_size,
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->max_c0_size, a->max_size, target_R);
|
2010-02-18 23:31:57 +00:00
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
// XXX need to report backpressure here!
|
2010-12-11 00:51:19 +00:00
|
|
|
while(ltable_->get_tree_c1_mergeable()) {
|
2011-03-25 20:05:49 +00:00
|
|
|
ltable_->c1_flushing = true;
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_cond_wait(<able_->c1_needed, ltable_->header_mut);
|
2011-03-25 20:05:49 +00:00
|
|
|
ltable_->c1_flushing = false;
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
xid = Tbegin();
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
// we just set c1 = c1'. Want to move c1 -> c1 mergeable, clean out c1.
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-04-28 21:29:15 +00:00
|
|
|
// 7: and perhaps c1_mergeable
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->set_tree_c1_mergeable(ltable_->get_tree_c1()); // c1_prime == c1.
|
2010-05-26 00:58:17 +00:00
|
|
|
stats->handed_off_tree();
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-04-28 21:29:15 +00:00
|
|
|
// 8: c1 = new empty.
|
2011-04-22 22:54:49 +00:00
|
|
|
ltable_->set_tree_c1(new diskTreeComponent(xid, ltable_->internal_region_size, ltable_->datapage_region_size, ltable_->datapage_size, stats, 10));
|
2010-05-19 23:42:06 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_cond_signal(<able_->c1_ready);
|
2010-12-14 00:06:32 +00:00
|
|
|
ltable_->update_persistent_header(xid);
|
2010-05-19 23:42:06 +00:00
|
|
|
Tcommit(xid);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
// DEBUG("mmt:\tUpdated C1's position on disk to %lld\n",ltable_->get_tree_c1()->get_root_rec().page);
|
2010-02-25 01:29:32 +00:00
|
|
|
// 13
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-08-24 00:40:48 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->merge_mgr->finished_merge(1);
|
2010-05-19 23:42:06 +00:00
|
|
|
// stats->pretty_print(stdout);
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//TODO: get the freeing outside of the lock
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2010-01-28 02:20:49 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
void * merge_scheduler::diskMergeThread()
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
2010-04-28 21:29:15 +00:00
|
|
|
int xid;
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
assert(ltable_->get_tree_c2());
|
2010-03-05 19:07:47 +00:00
|
|
|
|
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
int merge_count =0;
|
2010-12-11 00:51:19 +00:00
|
|
|
mergeStats * stats = ltable_->merge_mgr->get_merge_stats(2);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
|
|
|
while(true)
|
|
|
|
{
|
2010-04-28 19:21:25 +00:00
|
|
|
|
2010-03-24 20:30:35 +00:00
|
|
|
// 2: wait for input
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_writelock(ltable_->header_mut);
|
|
|
|
ltable_->merge_mgr->new_merge(2);
|
2010-01-23 02:13:59 +00:00
|
|
|
int done = 0;
|
|
|
|
// get a new input for merge
|
2010-12-11 00:51:19 +00:00
|
|
|
while(!ltable_->get_tree_c1_mergeable())
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_cond_signal(<able_->c1_needed);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
if(!ltable_->is_still_running()){
|
2010-01-23 02:13:59 +00:00
|
|
|
done = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("dmt:\twaiting for block ready cond\n");
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_cond_wait(<able_->c1_ready, ltable_->header_mut);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("dmt:\tblock ready\n");
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
if(done==1)
|
|
|
|
{
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-01-23 02:13:59 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-04-29 23:13:04 +00:00
|
|
|
stats->starting_merge();
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-04-28 21:29:15 +00:00
|
|
|
// 3: begin
|
2010-02-25 01:29:32 +00:00
|
|
|
xid = Tbegin();
|
|
|
|
|
|
|
|
// 4: do the merge.
|
2010-01-23 02:13:59 +00:00
|
|
|
//create the iterators
|
2010-12-11 00:51:19 +00:00
|
|
|
diskTreeComponent::iterator *itrA = ltable_->get_tree_c2()->open_iterator();
|
2011-03-25 20:05:49 +00:00
|
|
|
diskTreeComponent::iterator *itrB = ltable_->get_tree_c1_mergeable()->open_iterator(ltable_->merge_mgr, 0.05, <able_->c1_flushing);
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//create a new tree
|
2010-12-13 22:27:13 +00:00
|
|
|
diskTreeComponent * c2_prime = new diskTreeComponent(xid, ltable_->internal_region_size, ltable_->datapage_region_size, ltable_->datapage_size, stats, (uint64_t)(ltable_->max_c0_size * *ltable_->R() + stats->base_size)/ 1000);
|
2010-12-11 00:51:19 +00:00
|
|
|
// diskTreeComponent * c2_prime = new diskTreeComponent(xid, ltable_->internal_region_size, ltable_->datapage_region_size, ltable_->datapage_size, stats);
|
2010-02-17 23:38:31 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-04-28 21:29:15 +00:00
|
|
|
|
|
|
|
//do the merge
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("dmt:\tMerging:\n");
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
merge_iterators<typeof(*itrA),typeof(*itrB)>(xid, c2_prime, itrA, itrB, ltable_, c2_prime, stats, true);
|
2010-04-28 21:29:15 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
delete itrA;
|
2010-04-28 21:29:15 +00:00
|
|
|
delete itrB;
|
2010-02-18 23:31:57 +00:00
|
|
|
|
2010-02-25 01:29:32 +00:00
|
|
|
//5: force write the new region to disk
|
2010-03-13 00:05:06 +00:00
|
|
|
c2_prime->force(xid);
|
2010-02-18 23:31:57 +00:00
|
|
|
|
2010-02-25 01:29:32 +00:00
|
|
|
// (skip 6, 7, 8, 8.5, 9))
|
2010-02-18 23:31:57 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_writelock(ltable_->header_mut);
|
2010-02-25 01:29:32 +00:00
|
|
|
//12
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->get_tree_c2()->dealloc(xid);
|
|
|
|
delete ltable_->get_tree_c2();
|
2010-02-25 01:29:32 +00:00
|
|
|
//11.5
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->get_tree_c1_mergeable()->dealloc(xid);
|
2010-02-25 01:29:32 +00:00
|
|
|
//11
|
2010-12-11 00:51:19 +00:00
|
|
|
delete ltable_->get_tree_c1_mergeable();
|
|
|
|
ltable_->set_tree_c1_mergeable(0);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
|
|
|
//writes complete
|
2010-02-17 22:11:22 +00:00
|
|
|
//now atomically replace the old c2 with new c2
|
2010-01-23 02:13:59 +00:00
|
|
|
//pthread_mutex_lock(a->block_ready_mut);
|
2010-02-25 01:29:32 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
merge_count++;
|
|
|
|
//update the current optimal R value
|
2010-12-14 01:49:23 +00:00
|
|
|
*(ltable_->R()) = std::max(MIN_R, sqrt( ((double)stats->output_size()) / ((double)ltable_->mean_c0_run_length) ) );
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
DEBUG("\nR = %f\n", *(ltable_->R()));
|
2010-05-21 23:43:17 +00:00
|
|
|
|
2010-12-13 22:27:13 +00:00
|
|
|
DEBUG("dmt:\tmerge_count %lld\t#written bytes: %lld\n optimal r %.2f", stats.stats_merge_count, stats.output_size(), *(a->r_i));
|
2010-08-05 17:43:46 +00:00
|
|
|
// 10: C2 is never too big
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->set_tree_c2(c2_prime);
|
2010-05-21 23:43:17 +00:00
|
|
|
stats->handed_off_tree();
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
DEBUG("dmt:\tUpdated C2's position on disk to %lld\n",(long long)-1);
|
2010-02-25 01:29:32 +00:00
|
|
|
// 13
|
2010-12-14 00:06:32 +00:00
|
|
|
ltable_->update_persistent_header(xid);
|
2010-01-23 02:13:59 +00:00
|
|
|
Tcommit(xid);
|
2010-05-19 23:42:06 +00:00
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
rwlc_unlock(ltable_->header_mut);
|
2010-05-19 23:42:06 +00:00
|
|
|
// stats->pretty_print(stdout);
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->merge_mgr->finished_merge(2);
|
2010-08-24 00:40:48 +00:00
|
|
|
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-05-27 23:15:24 +00:00
|
|
|
static void periodically_force(int xid, int *i, diskTreeComponent * forceMe, stasis_log_t * log) {
|
2010-06-18 23:00:23 +00:00
|
|
|
if(*i > mergeManager::FORCE_INTERVAL) {
|
2010-05-27 23:15:24 +00:00
|
|
|
if(forceMe) forceMe->force(xid);
|
|
|
|
log->force_tail(log, LOG_FORCE_WAL);
|
|
|
|
*i = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-11 00:51:19 +00:00
|
|
|
static int garbage_collect(logtable<datatuple> * ltable_, datatuple ** garbage, int garbage_len, int next_garbage, bool force = false) {
|
2010-08-18 17:29:25 +00:00
|
|
|
if(next_garbage == garbage_len || force) {
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_mutex_lock(<able_->rb_mut);
|
2010-08-18 17:29:25 +00:00
|
|
|
for(int i = 0; i < next_garbage; i++) {
|
|
|
|
datatuple * t2tmp = NULL;
|
|
|
|
{
|
2010-12-11 00:51:19 +00:00
|
|
|
memTreeComponent<datatuple>::rbtree_t::iterator rbitr = ltable_->get_tree_c0()->find(garbage[i]);
|
|
|
|
if(rbitr != ltable_->get_tree_c0()->end()) {
|
2010-08-18 17:29:25 +00:00
|
|
|
t2tmp = *rbitr;
|
|
|
|
if((t2tmp->datalen() == garbage[i]->datalen()) &&
|
|
|
|
!memcmp(t2tmp->data(), garbage[i]->data(), garbage[i]->datalen())) {
|
|
|
|
// they match, delete t2tmp
|
|
|
|
} else {
|
|
|
|
t2tmp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // close rbitr before touching the tree.
|
|
|
|
if(t2tmp) {
|
2010-12-11 00:51:19 +00:00
|
|
|
ltable_->get_tree_c0()->erase(garbage[i]);
|
2010-12-15 00:15:59 +00:00
|
|
|
//ltable_->merge_mgr->get_merge_stats(0)->current_size -= garbage[i]->byte_length();
|
2010-08-18 17:29:25 +00:00
|
|
|
datatuple::freetuple(t2tmp);
|
|
|
|
}
|
|
|
|
datatuple::freetuple(garbage[i]);
|
|
|
|
}
|
2010-12-11 00:51:19 +00:00
|
|
|
pthread_mutex_unlock(<able_->rb_mut);
|
2010-08-18 17:29:25 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return next_garbage;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-28 02:20:49 +00:00
|
|
|
template <class ITA, class ITB>
|
2010-03-05 19:07:47 +00:00
|
|
|
void merge_iterators(int xid,
|
2010-05-19 23:42:06 +00:00
|
|
|
diskTreeComponent * forceMe,
|
2010-01-28 02:20:49 +00:00
|
|
|
ITA *itrA, //iterator on c1 or c2
|
|
|
|
ITB *itrB, //iterator on c0 or c1, respectively
|
2010-03-17 21:51:26 +00:00
|
|
|
logtable<datatuple> *ltable,
|
2010-06-05 00:41:52 +00:00
|
|
|
diskTreeComponent *scratch_tree, mergeStats * stats,
|
2010-01-28 02:20:49 +00:00
|
|
|
bool dropDeletes // should be true iff this is biggest component
|
|
|
|
)
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
2010-05-26 00:58:17 +00:00
|
|
|
stasis_log_t * log = (stasis_log_t*)stasis_log();
|
|
|
|
|
2010-05-28 01:29:10 +00:00
|
|
|
datatuple *t1 = itrA->next_callerFrees();
|
2010-06-05 00:41:52 +00:00
|
|
|
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
|
2010-01-23 02:13:59 +00:00
|
|
|
datatuple *t2 = 0;
|
2010-04-28 21:29:15 +00:00
|
|
|
|
2010-08-18 17:29:25 +00:00
|
|
|
int garbage_len = 100;
|
|
|
|
int next_garbage = 0;
|
|
|
|
datatuple ** garbage = (datatuple**)malloc(sizeof(garbage[0]) * garbage_len);
|
|
|
|
|
2010-05-19 23:42:06 +00:00
|
|
|
int i = 0;
|
|
|
|
|
2010-03-09 19:02:54 +00:00
|
|
|
while( (t2=itrB->next_callerFrees()) != 0)
|
2010-04-28 21:29:15 +00:00
|
|
|
{
|
2010-06-02 21:47:58 +00:00
|
|
|
ltable->merge_mgr->read_tuple_from_small_component(stats->merge_level, t2);
|
2010-03-05 19:07:47 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
DEBUG("tuple\t%lld: keylen %d datalen %d\n",
|
2010-04-28 21:29:15 +00:00
|
|
|
ntuples, *(t2->keylen),*(t2->datalen) );
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2011-04-22 22:54:49 +00:00
|
|
|
while(t1 != 0 && datatuple::compare(t1->rawkey(), t1->rawkeylen(), t2->rawkey(), t2->rawkeylen()) < 0) // t1 is less than t2
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
|
|
|
//insert t1
|
2011-04-22 22:54:49 +00:00
|
|
|
if(insert_filter(ltable, t1, dropDeletes)) {
|
2011-04-22 17:40:37 +00:00
|
|
|
scratch_tree->insertTuple(xid, t1);
|
|
|
|
i+=t1->byte_length();
|
|
|
|
ltable->merge_mgr->wrote_tuple(stats->merge_level, t1);
|
|
|
|
}
|
2010-02-10 21:49:50 +00:00
|
|
|
datatuple::freetuple(t1);
|
2010-08-23 23:28:29 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//advance itrA
|
2010-03-09 19:02:54 +00:00
|
|
|
t1 = itrA->next_callerFrees();
|
2010-08-23 23:28:29 +00:00
|
|
|
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
|
|
|
|
|
2010-05-27 23:15:24 +00:00
|
|
|
periodically_force(xid, &i, forceMe, log);
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
|
2011-04-22 22:54:49 +00:00
|
|
|
if(t1 != 0 && datatuple::compare(t1->strippedkey(), t1->strippedkeylen(), t2->strippedkey(), t2->strippedkeylen()) == 0)
|
2010-01-23 02:13:59 +00:00
|
|
|
{
|
|
|
|
datatuple *mtuple = ltable->gettuplemerger()->merge(t1,t2);
|
2010-05-12 22:16:41 +00:00
|
|
|
stats->merged_tuples(mtuple, t2, t1); // this looks backwards, but is right.
|
2010-04-28 21:29:15 +00:00
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
//insert merged tuple, drop deletes
|
2011-04-22 22:54:49 +00:00
|
|
|
if(insert_filter(ltable, mtuple, dropDeletes)) {
|
2010-04-29 00:57:48 +00:00
|
|
|
scratch_tree->insertTuple(xid, mtuple);
|
2010-05-27 23:15:24 +00:00
|
|
|
i+=mtuple->byte_length();
|
2011-04-22 17:40:37 +00:00
|
|
|
ltable->merge_mgr->wrote_tuple(stats->merge_level, mtuple);
|
2010-03-13 00:05:06 +00:00
|
|
|
}
|
2010-02-10 21:49:50 +00:00
|
|
|
datatuple::freetuple(t1);
|
2010-03-09 19:02:54 +00:00
|
|
|
t1 = itrA->next_callerFrees(); //advance itrA
|
2010-08-23 23:28:29 +00:00
|
|
|
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
|
2010-02-10 21:49:50 +00:00
|
|
|
datatuple::freetuple(mtuple);
|
2010-05-28 01:29:10 +00:00
|
|
|
periodically_force(xid, &i, forceMe, log);
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
else
|
2010-04-28 21:29:15 +00:00
|
|
|
{
|
2010-01-23 02:13:59 +00:00
|
|
|
//insert t2
|
2011-04-22 22:54:49 +00:00
|
|
|
if(insert_filter(ltable, t2, dropDeletes)) {
|
2011-04-22 17:40:37 +00:00
|
|
|
scratch_tree->insertTuple(xid, t2);
|
|
|
|
i+=t2->byte_length();
|
|
|
|
ltable->merge_mgr->wrote_tuple(stats->merge_level, t2);
|
|
|
|
}
|
2010-05-28 01:29:10 +00:00
|
|
|
periodically_force(xid, &i, forceMe, log);
|
2010-01-28 02:20:49 +00:00
|
|
|
// cannot free any tuples here; they may still be read through a lookup
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
2010-08-18 17:29:25 +00:00
|
|
|
if(stats->merge_level == 1) {
|
2010-12-15 00:15:59 +00:00
|
|
|
// We consume tuples from c0 as we read them, so update its stats here.
|
|
|
|
ltable->merge_mgr->wrote_tuple(0, t2);
|
|
|
|
|
2010-08-18 17:29:25 +00:00
|
|
|
next_garbage = garbage_collect(ltable, garbage, garbage_len, next_garbage);
|
|
|
|
garbage[next_garbage] = t2;
|
|
|
|
next_garbage++;
|
|
|
|
}
|
|
|
|
if(stats->merge_level != 1) {
|
|
|
|
datatuple::freetuple(t2);
|
|
|
|
}
|
|
|
|
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|
|
|
|
|
2011-04-22 17:40:37 +00:00
|
|
|
while(t1 != 0) {// t2 is empty, but t1 still has stuff in it.
|
2011-04-22 22:54:49 +00:00
|
|
|
if(insert_filter(ltable, t1, dropDeletes)) {
|
2011-04-22 17:40:37 +00:00
|
|
|
scratch_tree->insertTuple(xid, t1);
|
|
|
|
ltable->merge_mgr->wrote_tuple(stats->merge_level, t1);
|
|
|
|
i += t1->byte_length();
|
|
|
|
}
|
2010-04-28 21:29:15 +00:00
|
|
|
datatuple::freetuple(t1);
|
2010-01-23 02:13:59 +00:00
|
|
|
|
2010-04-28 21:29:15 +00:00
|
|
|
//advance itrA
|
|
|
|
t1 = itrA->next_callerFrees();
|
2010-06-05 00:41:52 +00:00
|
|
|
ltable->merge_mgr->read_tuple_from_large_component(stats->merge_level, t1);
|
2010-05-27 23:15:24 +00:00
|
|
|
periodically_force(xid, &i, forceMe, log);
|
2010-03-13 00:05:06 +00:00
|
|
|
}
|
2010-01-23 02:13:59 +00:00
|
|
|
DEBUG("dpages: %d\tnpages: %d\tntuples: %d\n", dpages, npages, ntuples);
|
|
|
|
|
2010-08-18 17:29:25 +00:00
|
|
|
next_garbage = garbage_collect(ltable, garbage, garbage_len, next_garbage, true);
|
|
|
|
free(garbage);
|
|
|
|
|
2010-03-13 00:05:06 +00:00
|
|
|
scratch_tree->writes_done();
|
2010-01-23 02:13:59 +00:00
|
|
|
}
|