From 99bfa2a3b8df8607a7849c05419855ed441589f3 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Mon, 6 Apr 2015 14:16:20 +0900 Subject: [PATCH 01/22] Import of machi_chain_manager1.erl and friends; tests broken --- include/machi_chain_manager.hrl | 41 + src/machi_chain_manager1.erl | 1547 +++++++++++++++++++++++++++ test/machi_chain_manager1_pulse.erl | 379 +++++++ test/machi_chain_manager1_test.erl | 589 ++++++++++ test/machi_partition_simulator.erl | 239 +++++ 5 files changed, 2795 insertions(+) create mode 100644 include/machi_chain_manager.hrl create mode 100644 src/machi_chain_manager1.erl create mode 100644 test/machi_chain_manager1_pulse.erl create mode 100644 test/machi_chain_manager1_test.erl create mode 100644 test/machi_partition_simulator.erl diff --git a/include/machi_chain_manager.hrl b/include/machi_chain_manager.hrl new file mode 100644 index 0000000..7a100b1 --- /dev/null +++ b/include/machi_chain_manager.hrl @@ -0,0 +1,41 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-define(NOT_FLAPPING, {0,0,0}). + +-type projection() :: #projection_v1{}. + +-record(ch_mgr, { + init_finished :: boolean(), + name :: pv1_server(), + proj :: projection(), + proj_history :: queue(), + myflu :: pid() | atom(), + flap_limit :: non_neg_integer(), + %% + runenv :: list(), %proplist() + opts :: list(), %proplist() + flaps=0 :: integer(), + flap_start = ?NOT_FLAPPING + :: erlang:now(), + + %% Deprecated ... TODO: remove when old test unit test code is removed + proj_proposed :: 'none' | projection() + }). diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl new file mode 100644 index 0000000..5f662f6 --- /dev/null +++ b/src/machi_chain_manager1.erl @@ -0,0 +1,1547 @@ +%% ------------------------------------------------------------------- +%% +%% Machi: a small village of replicated files +%% +%% Copyright (c) 2014-2015 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(machi_chain_manager1). + +%% TODO: I am going to sever the connection between the flowchart and the +%% code. That diagram is really valuable, but it also takes a long time +%% to make any kind of edit; the process is too slow. This is a todo +%% item a reminder that the flowchart is important documentation and +%% must be brought back into sync with the code soon. + +-behaviour(gen_server). + +-include("machi_projection.hrl"). +-include("machi_chain_manager.hrl"). + +-define(D(X), io:format(user, "~s ~p\n", [??X, X])). +-define(Dw(X), io:format(user, "~s ~w\n", [??X, X])). + +%% Keep a history of our flowchart execution in the process dictionary. +-define(REACT(T), put(react, [T|get(react)])). + +%% API +-export([start_link/3, start_link/4, stop/1, ping/1]). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-export([make_projection_summary/1, projection_transitions_are_sane/2]). + +-ifdef(TEST). + +-export([test_calc_projection/2, + test_calc_proposed_projection/1, + test_write_proposed_projection/1, + test_read_latest_public_projection/2, + test_react_to_env/1, + get_all_hosed/1]). + +-ifdef(EQC). +-include_lib("eqc/include/eqc.hrl"). +-endif. +-ifdef(PULSE). +-compile({parse_transform, pulse_instrument}). +-endif. + +-include_lib("eunit/include/eunit.hrl"). +-compile(export_all). +-endif. %TEST + +start_link(MyName, All_list, MyFLUPid) -> + start_link(MyName, All_list, MyFLUPid, []). + +start_link(MyName, All_list, MyFLUPid, MgrOpts) -> + gen_server:start_link(?MODULE, {MyName, All_list, MyFLUPid, MgrOpts}, []). + +stop(Pid) -> + gen_server:call(Pid, {stop}, infinity). + +ping(Pid) -> + gen_server:call(Pid, {ping}, infinity). + +-ifdef(TEST). + +%% Test/debugging code only. + +test_write_proposed_projection(Pid) -> + gen_server:call(Pid, {test_write_proposed_projection}, infinity). + +%% Calculate a projection and return it to us. +%% If KeepRunenvP is true, the server will retain its change in its +%% runtime environment, e.g., changes in simulated network partitions. +%% The server's internal proposed projection is not altered. +test_calc_projection(Pid, KeepRunenvP) -> + gen_server:call(Pid, {test_calc_projection, KeepRunenvP}, infinity). + +%% Async! +%% The server's internal proposed projection *is* altered. +test_calc_proposed_projection(Pid) -> + gen_server:cast(Pid, {test_calc_proposed_projection}). + +test_read_latest_public_projection(Pid, ReadRepairP) -> + gen_server:call(Pid, {test_read_latest_public_projection, ReadRepairP}, + infinity). + +test_react_to_env(Pid) -> + gen_server:call(Pid, {test_react_to_env}, infinity). + +-endif. % TEST + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +init({MyName, All_list, MyFLUPid, MgrOpts}) -> + RunEnv = [%% {seed, Seed}, + {seed, now()}, + {network_partitions, []}, + {network_islands, []}, + {flapping_i, []}, + {up_nodes, not_init_yet}], + BestProj = make_initial_projection(MyName, All_list, All_list, + [], []), + NoneProj = make_initial_projection(MyName, All_list, [], + [], []), + S = #ch_mgr{init_finished=false, + name=MyName, + proj=NoneProj, + proj_history=queue:new(), + myflu=MyFLUPid, % pid or atom local name + %% TODO 2015-03-04: revisit, should this constant be bigger? + %% Yes, this should be bigger, but it's a hack. There is + %% no guarantee that all parties will advance to a minimum + %% flap awareness in the amount of time that this mgr will. + flap_limit=length(All_list) + 50, + runenv=RunEnv, + opts=MgrOpts}, + + %% TODO: There is a bootstrapping problem there that needs to be + %% solved eventually: someone/something needs to set the initial + %% state for the chain. + %% + %% The PoC hack here will set the chain to all members. That may + %% be fine for testing purposes, but it won't work for real life. + %% For example, if chain C has been running with [a,b] for a + %% while, then we start c. We don't want c to immediately say, + %% hey, let's do [a,b,c] immediately ... UPI invariant requires + %% repair, etc. etc. + + self() ! {finish_init, BestProj}, + {ok, S}. + +handle_call(_Call, _From, #ch_mgr{init_finished=false} = S) -> + {reply, not_initialized, S}; +handle_call({test_write_proposed_projection}, _From, S) -> + if S#ch_mgr.proj_proposed == none -> + {reply, none, S}; + true -> + {Res, S2} = do_cl_write_proposed_proj(S), + {reply, Res, S2} + end; +handle_call({ping}, _From, S) -> + {reply, pong, S}; +handle_call({stop}, _From, S) -> + {stop, normal, ok, S}; +handle_call({test_calc_projection, KeepRunenvP}, _From, + #ch_mgr{name=MyName}=S) -> + RelativeToServer = MyName, + {P, S2} = calc_projection(S, RelativeToServer), + {reply, {ok, P}, if KeepRunenvP -> S2; + true -> S + end}; +handle_call({test_read_latest_public_projection, ReadRepairP}, _From, S) -> + {Perhaps, Val, ExtraInfo, S2} = + do_cl_read_latest_public_projection(ReadRepairP, S), + Res = {Perhaps, Val, ExtraInfo}, + {reply, Res, S2}; +handle_call({test_react_to_env}, _From, S) -> + {TODOtodo, S2} = do_react_to_env(S), + {reply, TODOtodo, S2}; +handle_call(_Call, _From, S) -> + {reply, whaaaaaaaaaa, S}. + +handle_cast(_Cast, #ch_mgr{init_finished=false} = S) -> + {noreply, S}; +handle_cast({test_calc_proposed_projection}, #ch_mgr{name=MyName}=S) -> + RelativeToServer = MyName, + {Proj, S2} = calc_projection(S, RelativeToServer), + {noreply, S2#ch_mgr{proj_proposed=Proj}}; +handle_cast(_Cast, S) -> + ?D({cast_whaaaaaaaaaaa, _Cast}), + {noreply, S}. + +handle_info({finish_init, BestProj}, S) -> + S2 = finish_init(BestProj, S), + {noreply, S2}; +handle_info(Msg, S) -> + exit({bummer, Msg}), + {noreply, S}. + +terminate(_Reason, _S) -> + ok. + +code_change(_OldVsn, S, _Extra) -> + {ok, S}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> + case machi_flu0:proj_read_latest(MyFLU, private) of + error_unwritten -> + Epoch = BestProj#projection_v1.epoch_number, + case machi_flu0:proj_write(MyFLU, Epoch, private, BestProj) of + ok -> + S#ch_mgr{init_finished=true, proj=BestProj}; + error_written -> + exit({yo_impossible, ?LINE}); + Else -> + ?D({retry,Else}), + timer:sleep(100), + finish_init(BestProj, S) + end; + {ok, Proj} -> + S#ch_mgr{init_finished=true, proj=Proj}; + Else -> + ?D({todo, fix_up_eventually, Else}), + exit({yo_weird, Else}) + end. + +do_cl_write_proposed_proj(#ch_mgr{proj_proposed=Proj} = S) -> + #projection_v1{epoch_number=Epoch} = Proj, + case cl_write_public_proj(Epoch, Proj, S) of + {ok, _S2}=Res -> + Res; + {_Other2, _S2}=Else2 -> + Else2 + end. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +cl_write_public_proj(Epoch, Proj, S) -> + cl_write_public_proj(Epoch, Proj, false, S). + +cl_write_public_proj_skip_local_error(Epoch, Proj, S) -> + cl_write_public_proj(Epoch, Proj, true, S). + +cl_write_public_proj(Epoch, Proj, SkipLocalWriteErrorP, S) -> + %% Write to local public projection store first, and if it succeeds, + %% then write to all remote public projection stores. + cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, S). + +cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, + #ch_mgr{myflu=MyFLU}=S) -> + {_UpNodes, Partitions, S2} = calc_up_nodes(S), + Res0 = perhaps_call_t( + S, Partitions, MyFLU, + fun() -> machi_flu0:proj_write(MyFLU, Epoch, public, Proj) end), + Continue = fun() -> + FLUs = Proj#projection_v1.all_members -- [MyFLU], + cl_write_public_proj_remote(FLUs, Partitions, Epoch, Proj, S) + end, + case Res0 of + ok -> + {XX, SS} = Continue(), + {{local_write_result, ok, XX}, SS}; + Else when SkipLocalWriteErrorP -> + {XX, SS} = Continue(), + {{local_write_result, Else, XX}, SS}; + Else when Else == error_written; Else == timeout; Else == t_timeout -> + {Else, S2} + end. + +cl_write_public_proj_remote(FLUs, Partitions, Epoch, Proj, S) -> + %% We're going to be very care-free about this write because we'll rely + %% on the read side to do any read repair. + DoIt = fun(X) -> machi_flu0:proj_write(X, Epoch, public, Proj) end, + Rs = [{FLU, perhaps_call_t(S, Partitions, FLU, fun() -> DoIt(FLU) end)} || + FLU <- FLUs], + {{remote_write_results, Rs}, S}. + +do_cl_read_latest_public_projection(ReadRepairP, + #ch_mgr{proj=Proj1, myflu=_MyFLU} = S) -> + _Epoch1 = Proj1#projection_v1.epoch_number, + case cl_read_latest_projection(public, S) of + {needs_repair, FLUsRs, Extra, S3} -> + if not ReadRepairP -> + {not_unanimous, todoxyz, [{results, FLUsRs}|Extra], S3}; + true -> + {_Status, S4} = do_read_repair(FLUsRs, Extra, S3), + do_cl_read_latest_public_projection(ReadRepairP, S4) + end; + {UnanimousTag, Proj2, Extra, S3}=_Else -> + {UnanimousTag, Proj2, Extra, S3} + end. + +read_latest_projection_call_only(ProjectionType, AllHosed, + #ch_mgr{proj=CurrentProj}=S) -> + #projection_v1{all_members=All_list} = CurrentProj, + All_queried_list = All_list -- AllHosed, + + {_UpNodes, Partitions, S2} = calc_up_nodes(S), + DoIt = fun(X) -> + case machi_flu0:proj_read_latest(X, ProjectionType) of + {ok, P} -> P; + Else -> Else + end + end, + Rs = [perhaps_call_t(S, Partitions, FLU, fun() -> DoIt(FLU) end) || + FLU <- All_queried_list], + FLUsRs = lists:zip(All_queried_list, Rs), + {All_queried_list, FLUsRs, S2}. + +cl_read_latest_projection(ProjectionType, S) -> + AllHosed = [], + cl_read_latest_projection(ProjectionType, AllHosed, S). + +cl_read_latest_projection(ProjectionType, AllHosed, S) -> + {All_queried_list, FLUsRs, S2} = + read_latest_projection_call_only(ProjectionType, AllHosed, S), + + rank_and_sort_projections_with_extra(All_queried_list, FLUsRs, S2). + +rank_and_sort_projections_with_extra(All_queried_list, FLUsRs, + #ch_mgr{proj=CurrentProj}=S) -> + UnwrittenRs = [x || {_, error_unwritten} <- FLUsRs], + Ps = [Proj || {_FLU, Proj} <- FLUsRs, is_record(Proj, projection_v1)], + BadAnswerFLUs = [FLU || {FLU, Answer} <- FLUsRs, + not is_record(Answer, projection_v1)], + + if All_queried_list == [] + orelse + length(UnwrittenRs) == length(FLUsRs) -> + {error_unwritten, FLUsRs, [todo_fix_caller_perhaps], S}; + UnwrittenRs /= [] -> + {needs_repair, FLUsRs, [flarfus], S}; + true -> + [{_Rank, BestProj}|_] = rank_and_sort_projections(Ps, CurrentProj), + NotBestPs = [Proj || Proj <- Ps, Proj /= BestProj], + UnanimousTag = if NotBestPs == [] -> unanimous; + true -> not_unanimous + end, + Extra = [{all_members_replied, length(FLUsRs) == length(All_queried_list)}], + Best_FLUs = [FLU || {FLU, Projx} <- FLUsRs, Projx == BestProj], + TransAllHosed = lists:usort( + lists:flatten([get_all_hosed(P) || P <- Ps])), + AllFlapCounts = merge_flap_counts([get_all_flap_counts(P) || + P <- Ps]), + Extra2 = [{all_queried_list, All_queried_list}, + {flus_rs, FLUsRs}, + {unanimous_flus,Best_FLUs}, + {not_unanimous_flus, All_queried_list -- + (Best_FLUs ++ BadAnswerFLUs)}, + {bad_answer_flus, BadAnswerFLUs}, + {not_unanimous_answers, NotBestPs}, + {trans_all_hosed, TransAllHosed}, + {trans_all_flap_counts, AllFlapCounts}|Extra], + {UnanimousTag, BestProj, Extra2, S} + end. + +do_read_repair(FLUsRs, _Extra, #ch_mgr{proj=CurrentProj} = S) -> + Unwrittens = [x || {_FLU, error_unwritten} <- FLUsRs], + Ps = [Proj || {_FLU, Proj} <- FLUsRs, is_record(Proj, projection_v1)], + if Unwrittens == [] orelse Ps == [] -> + {nothing_to_do, S}; + true -> + %% We have at least one unwritten and also at least one proj. + %% Pick the best one, then spam it everywhere. + + [{_Rank, BestProj}|_] = rank_and_sort_projections(Ps, CurrentProj), + Epoch = BestProj#projection_v1.epoch_number, + + %% We're doing repair, so use the flavor that will + %% continue to all others even if there is an + %% error_written on the local FLU. + {_DontCare, _S2}=Res = cl_write_public_proj_skip_local_error( + Epoch, BestProj, S), + Res + end. + +make_initial_projection(MyName, All_list, UPI_list, Repairing_list, Ps) -> + make_projection(0, MyName, All_list, [], UPI_list, Repairing_list, Ps). + +make_projection(EpochNum, + MyName, All_list, Down_list, UPI_list, Repairing_list, + Dbg) -> + make_projection(EpochNum, + MyName, All_list, Down_list, UPI_list, Repairing_list, + Dbg, []). + +make_projection(EpochNum, + MyName, All_list, Down_list, UPI_list, Repairing_list, + Dbg, Dbg2) -> + P = #projection_v1{epoch_number=EpochNum, + epoch_csum= <<>>, % always checksums as <<>> + creation_time=now(), + author_server=MyName, + all_members=All_list, + down=Down_list, + upi=UPI_list, + repairing=Repairing_list, + dbg=Dbg, + dbg2=[] % always checksums as [] + }, + P2 = update_projection_checksum(P), + P2#projection_v1{dbg2=Dbg2}. + +update_projection_checksum(#projection_v1{dbg2=Dbg2} = P) -> + CSum = crypto:hash(sha, term_to_binary(P#projection_v1{dbg2=[]})), + P#projection_v1{epoch_csum=CSum, dbg2=Dbg2}. + +update_projection_dbg2(P, Dbg2) when is_list(Dbg2) -> + P#projection_v1{dbg2=Dbg2}. + +calc_projection(S, RelativeToServer) -> + calc_projection(S, RelativeToServer, []). + +calc_projection(#ch_mgr{proj=LastProj, runenv=RunEnv} = S, + RelativeToServer, AllHosed) -> + Dbg = [], + OldThreshold = proplists:get_value(old_threshold, RunEnv), + NoPartitionThreshold = proplists:get_value(no_partition_threshold, RunEnv), + calc_projection(OldThreshold, NoPartitionThreshold, LastProj, + RelativeToServer, AllHosed, Dbg, S). + +%% OldThreshold: Percent chance of using the old/previous network partition list +%% NoPartitionThreshold: If the network partition changes, what percent chance +%% that there are no partitions at all? + +calc_projection(_OldThreshold, _NoPartitionThreshold, LastProj, + RelativeToServer, AllHosed, Dbg, + #ch_mgr{name=MyName, runenv=RunEnv1}=S) -> + #projection_v1{epoch_number=OldEpochNum, + all_members=All_list, + upi=OldUPI_list, + repairing=OldRepairing_list + } = LastProj, + LastUp = lists:usort(OldUPI_list ++ OldRepairing_list), + AllMembers = (S#ch_mgr.proj)#projection_v1.all_members, + {Up0, Partitions, RunEnv2} = calc_up_nodes(MyName, + AllMembers, RunEnv1), + Up = Up0 -- AllHosed, + + NewUp = Up -- LastUp, + Down = AllMembers -- Up, + + NewUPI_list = [X || X <- OldUPI_list, lists:member(X, Up)], + Repairing_list2 = [X || X <- OldRepairing_list, lists:member(X, Up)], + {NewUPI_list3, Repairing_list3, RunEnv3} = + case {NewUp, Repairing_list2} of + {[], []} -> +D_foo=[], + {NewUPI_list, [], RunEnv2}; + {[], [H|T]} when RelativeToServer == hd(NewUPI_list) -> + %% The author is head of the UPI list. Let's see if + %% *everyone* in the UPI+repairing lists are using our + %% projection. This is to simulate a requirement that repair + %% a real repair process cannot take place until the chain is + %% stable, i.e. everyone is in the same epoch. + + %% TODO create a real API call for fetching this info. + SameEpoch_p = check_latest_private_projections( + tl(NewUPI_list) ++ Repairing_list2, + S#ch_mgr.proj, Partitions, S), + if not SameEpoch_p -> +D_foo=[], + {NewUPI_list, OldRepairing_list, RunEnv2}; + true -> +D_foo=[{repair_airquote_done, {we_agree, (S#ch_mgr.proj)#projection_v1.epoch_number}}], + {NewUPI_list ++ [H], T, RunEnv2} + end; + {_, _} -> +D_foo=[], + {NewUPI_list, OldRepairing_list, RunEnv2} + end, + Repairing_list4 = case NewUp of + [] -> Repairing_list3; + NewUp -> Repairing_list3 ++ NewUp + end, + Repairing_list5 = Repairing_list4 -- Down, + + TentativeUPI = NewUPI_list3, + TentativeRepairing = Repairing_list5, + + {NewUPI, NewRepairing} = + if TentativeUPI == [] andalso TentativeRepairing /= [] -> + [FirstRepairing|TailRepairing] = TentativeRepairing, + {[FirstRepairing], TailRepairing}; + true -> + {TentativeUPI, TentativeRepairing} + end, + + P = make_projection(OldEpochNum + 1, + MyName, All_list, Down, NewUPI, NewRepairing, + D_foo ++ + Dbg ++ [{ps, Partitions},{nodes_up, Up}]), + {P, S#ch_mgr{runenv=RunEnv3}}. + +check_latest_private_projections(FLUs, MyProj, Partitions, S) -> + FoldFun = fun(_FLU, false) -> + false; + (FLU, true) -> + F = fun() -> + machi_flu0:proj_read_latest(FLU, private) + end, + case perhaps_call_t(S, Partitions, FLU, F) of + {ok, RemotePrivateProj} -> + %% TODO: For use inside the simulator, this + %% function needs to check if RemotePrivateProj + %% contains a nested inner projection and, if + %% so, compare epoch# and upi & repairing lists. + %% If the nested inner proj is not checked here, + %% then a FLU in asymmetric partition flapping + %% case will appear in the simulator to be stuck + %% in repairing state. + if MyProj#projection_v1.epoch_number == + RemotePrivateProj#projection_v1.epoch_number + andalso + MyProj#projection_v1.epoch_csum == + RemotePrivateProj#projection_v1.epoch_csum -> + true; + true -> + false + end; + _ -> + false + end + end, + lists:foldl(FoldFun, true, FLUs). + +calc_up_nodes(#ch_mgr{name=MyName, proj=Proj, runenv=RunEnv1}=S) -> + AllMembers = Proj#projection_v1.all_members, + {UpNodes, Partitions, RunEnv2} = + calc_up_nodes(MyName, AllMembers, RunEnv1), + {UpNodes, Partitions, S#ch_mgr{runenv=RunEnv2}}. + +calc_up_nodes(MyName, AllMembers, RunEnv1) -> + {Partitions2, Islands2} = machi_partition_simulator:get(AllMembers), + catch ?REACT({partitions,Partitions2}), + catch ?REACT({islands,Islands2}), + UpNodes = lists:sort( + [Node || Node <- AllMembers, + not lists:member({MyName, Node}, Partitions2), + not lists:member({Node, MyName}, Partitions2)]), + RunEnv2 = replace(RunEnv1, + [{network_partitions, Partitions2}, + {network_islands, Islands2}, + {up_nodes, UpNodes}]), + {UpNodes, Partitions2, RunEnv2}. + +replace(PropList, Items) -> + proplists:compact(Items ++ PropList). + +make_projection_summary(#projection_v1{epoch_number=EpochNum, + all_members=_All_list, + down=Down_list, + author_server=Author, + upi=UPI_list, + repairing=Repairing_list, + dbg=Dbg, dbg2=Dbg2}) -> + [{epoch,EpochNum},{author,Author}, + {upi,UPI_list},{repair,Repairing_list},{down,Down_list}, + {d,Dbg}, {d2,Dbg2}]. + +rank_and_sort_projections(Ps, CurrentProj) -> + Epoch = lists:max([Proj#projection_v1.epoch_number || Proj <- Ps]), + MaxPs = [Proj || Proj <- Ps, + Proj#projection_v1.epoch_number == Epoch], + %% Sort with highest rank first (custom sort) + lists:sort(fun({RankA,_}, {RankB,_}) -> RankA > RankB end, + rank_projections(MaxPs, CurrentProj)). + +%% Caller must ensure all Projs are of the same epoch number. +%% If the caller gives us projections with different epochs, we assume +%% that the caller is doing an OK thing. + +rank_projections(Projs, CurrentProj) -> + #projection_v1{all_members=All_list} = CurrentProj, + MemberRank = orddict:from_list( + lists:zip(All_list, lists:seq(1, length(All_list)))), + N = length(All_list), + [{rank_projection(Proj, MemberRank, N), Proj} || Proj <- Projs]. + +rank_projection(#projection_v1{upi=[]}, _MemberRank, _N) -> + -100; +rank_projection(#projection_v1{author_server=Author, + upi=UPI_list, + repairing=Repairing_list}, MemberRank, N) -> + AuthorRank = orddict:fetch(Author, MemberRank), + %% (AuthorRank-AuthorRank) + % feels unstable???? + AuthorRank + % feels stable + ( N * length(Repairing_list)) + + (N*N * length(UPI_list)). + +do_react_to_env(S) -> + put(react, []), + react_to_env_A10(S). + +react_to_env_A10(S) -> + ?REACT(a10), + react_to_env_A20(0, S). + +react_to_env_A20(Retries, S) -> + ?REACT(a20), + {UnanimousTag, P_latest, ReadExtra, S2} = + do_cl_read_latest_public_projection(true, S), + + %% The UnanimousTag isn't quite sufficient for our needs. We need + %% to determine if *all* of the UPI+Repairing FLUs are members of + %% the unanimous server replies. + UnanimousFLUs = lists:sort(proplists:get_value(unanimous_flus, ReadExtra)), + UPI_Repairing_FLUs = lists:sort(P_latest#projection_v1.upi ++ + P_latest#projection_v1.repairing), + All_UPI_Repairing_were_unanimous = UPI_Repairing_FLUs == UnanimousFLUs, + %% TODO: investigate if the condition below is more correct? + %% All_UPI_Repairing_were_unanimous = (UPI_Repairing_FLUs -- UnanimousFLUs) == [], + LatestUnanimousP = + if UnanimousTag == unanimous + andalso + All_UPI_Repairing_were_unanimous -> + ?REACT({a20,?LINE}), + true; + UnanimousTag == unanimous -> + ?REACT({a20,?LINE,[{upi_repairing,UPI_Repairing_FLUs}, + {unanimous,UnanimousFLUs}]}), + false; + UnanimousTag == not_unanimous -> + ?REACT({a20,?LINE}), + false; + true -> + exit({badbad, UnanimousTag}) + end, + react_to_env_A30(Retries, P_latest, LatestUnanimousP, ReadExtra, S2). + +react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, + #ch_mgr{name=MyName, proj=P_current, + flap_limit=FlapLimit} = S) -> + ?REACT(a30), + RelativeToServer = MyName, + {P_newprop1, S2} = calc_projection(S, RelativeToServer), + ?REACT({a30, ?LINE, [{newprop1, make_projection_summary(P_newprop1)}]}), + + %% Are we flapping yet? + {P_newprop2, S3} = calculate_flaps(P_newprop1, P_current, FlapLimit, S2), + + %% Move the epoch number up ... originally done in C300. + #projection_v1{epoch_number=Epoch_newprop2}=P_newprop2, + #projection_v1{epoch_number=Epoch_latest}=P_latest, + NewEpoch = erlang:max(Epoch_newprop2, Epoch_latest) + 1, + P_newprop3 = P_newprop2#projection_v1{epoch_number=NewEpoch}, + ?REACT({a30, ?LINE, [{newprop3, make_projection_summary(P_newprop3)}]}), + + {P_newprop10, S10} = + case get_flap_count(P_newprop3) of + %% TODO: refactor to eliminate cut-and-paste code in 'when' + {_, P_newprop3_flap_count} when P_newprop3_flap_count >= FlapLimit -> + AllHosed = get_all_hosed(S3), + {P_i, S_i} = calc_projection(S3, MyName, AllHosed), + P_inner = case lists:member(MyName, AllHosed) of + false -> + P_i; + true -> + P_i#projection_v1{upi=[MyName], + repairing=[], + down=P_i#projection_v1.all_members + -- [MyName]} + end, + + %% TODO FIXME A naive assignment here will cause epoch # + %% instability of the inner projection. We need a stable + %% epoch number somehow. ^_^ + %% P_inner2 = P_inner#projection_v1{epoch_number=P_newprop3#projection_v1.epoch_number}, + + FinalInnerEpoch = + case proplists:get_value(inner_projection, + P_current#projection_v1.dbg) of + undefined -> + AllFlapCounts_epk = + [Epk || {{Epk,_FlTime}, _FlCount} <- + get_all_flap_counts(P_newprop3)], + case AllFlapCounts_epk of + [] -> + P_newprop3#projection_v1.epoch_number; + [_|_] -> + lists:max(AllFlapCounts_epk) + end; + P_oldinner -> + if P_oldinner#projection_v1.upi == P_inner#projection_v1.upi + andalso + P_oldinner#projection_v1.repairing == P_inner#projection_v1.repairing + andalso + P_oldinner#projection_v1.down == P_inner#projection_v1.down -> + P_oldinner#projection_v1.epoch_number; + true -> + P_oldinner#projection_v1.epoch_number + 1 + end + end, + + P_inner2 = P_inner#projection_v1{epoch_number=FinalInnerEpoch}, + InnerInfo = [{inner_summary, make_projection_summary(P_inner2)}, + {inner_projection, P_inner2}], + DbgX = replace(P_newprop3#projection_v1.dbg, InnerInfo), + ?REACT({a30, ?LINE, [qqqwww|DbgX]}), + {P_newprop3#projection_v1{dbg=DbgX}, S_i}; + _ -> + {P_newprop3, S3} + end, + + react_to_env_A40(Retries, P_newprop10, P_latest, + LatestUnanimousP, S10). + +react_to_env_A40(Retries, P_newprop, P_latest, LatestUnanimousP, + #ch_mgr{name=MyName, proj=P_current}=S) -> + ?REACT(a40), + [{Rank_newprop, _}] = rank_projections([P_newprop], P_current), + [{Rank_latest, _}] = rank_projections([P_latest], P_current), + LatestAuthorDownP = lists:member(P_latest#projection_v1.author_server, + P_newprop#projection_v1.down), + + if + P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number + orelse + not LatestUnanimousP -> + ?REACT({a40, ?LINE, + [{latest_epoch, P_latest#projection_v1.epoch_number}, + {current_epoch, P_current#projection_v1.epoch_number}, + {latest_unanimous_p, LatestUnanimousP}]}), + + %% 1st clause: someone else has written a newer projection + %% 2nd clause: a network partition has healed, revealing a + %% differing opinion. + react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, + Rank_newprop, Rank_latest, S); + + P_latest#projection_v1.epoch_number < P_current#projection_v1.epoch_number + orelse + P_latest /= P_current -> + ?REACT({a40, ?LINE, + [{latest_epoch, P_latest#projection_v1.epoch_number}, + {current_epoch, P_current#projection_v1.epoch_number}, + {neq, P_latest /= P_current}]}), + + %% Both of these cases are rare. Elsewhere, the code + %% assumes that the local FLU's projection store is always + %% available, so reads & writes to it aren't going to fail + %% willy-nilly. If that assumption is true, then we can + %% reason as follows: + %% + %% a. If we can always read from the local FLU projection + %% store, then the 1st clause isn't possible because + %% P_latest's epoch # must be at least as large as + %% P_current's epoch # + %% + %% b. If P_latest /= P_current, then there can't be a + %% unanimous reply for P_latest, so the earlier 'if' + %% clause would be triggered and so we could never reach + %% this clause. + %% + %% I'm keeping this 'if' clause just in case the local FLU + %% projection store assumption changes. + react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, + Rank_newprop, Rank_latest, S); + + %% A40a (see flowchart) + Rank_newprop > Rank_latest -> + ?REACT({b10, ?LINE, + [{rank_latest, Rank_latest}, + {rank_newprop, Rank_newprop}, + {latest_author, P_latest#projection_v1.author_server}]}), + + %% TODO: There may be an "improvement" here. If we're the + %% highest-ranking FLU in the all_members list, then if we make a + %% projection where our UPI list is the same as P_latest's, and + %% our repairing list is the same as P_latest's, then it may not + %% be necessary to write our projection: it doesn't "improve" + %% anything UPI-wise or repairing-wise. But it isn't clear to me + %% if it's 100% correct to "improve" here and skip writing + %% P_newprop, yet. + react_to_env_C300(P_newprop, P_latest, S); + + %% A40b (see flowchart) + P_latest#projection_v1.author_server == MyName + andalso + (P_newprop#projection_v1.upi /= P_latest#projection_v1.upi + orelse + P_newprop#projection_v1.repairing /= P_latest#projection_v1.repairing) -> + ?REACT({a40, ?LINE, + [{latest_author, P_latest#projection_v1.author_server}, + {newprop_upi, P_newprop#projection_v1.upi}, + {latest_upi, P_latest#projection_v1.upi}, + {newprop_repairing, P_newprop#projection_v1.repairing}, + {latest_repairing, P_latest#projection_v1.repairing}]}), + + react_to_env_C300(P_newprop, P_latest, S); + + %% A40c (see flowchart) + LatestAuthorDownP -> + ?REACT({a40, ?LINE, + [{latest_author, P_latest#projection_v1.author_server}, + {author_is_down_p, LatestAuthorDownP}]}), + + %% TODO: I believe that membership in the + %% P_newprop#projection_v1.down is not sufficient for long + %% chains. Rather, we ought to be using a full broadcast + %% gossip of server up status. + %% + %% Imagine 5 servers in an "Olympic Rings" style + %% overlapping network paritition, where ring1 = upper + %% leftmost and ring5 = upper rightmost. It's both + %% possible and desirable for ring5's projection to be + %% seen (public) by ring1. Ring5's projection's rank is + %% definitely higher than ring1's proposed projection's + %% rank ... but we're in a crazy netsplit where: + %% * if we accept ring5's proj: only one functioning chain + %% ([ring4,ring5] but stable + %% * if we accept ring1's proj: two functioning chains + %% ([ring1,ring2] and [ring4,ring5] indepependently) + %% but unstable: we're probably going to flap back & forth?! + react_to_env_C300(P_newprop, P_latest, S); + + true -> + ?REACT({a40, ?LINE, [true]}), + + react_to_env_A50(P_latest, S) + end. + +react_to_env_A50(P_latest, S) -> + ?REACT(a50), + + HH = get(react), + io:format(user, "HEE50s ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- HH, is_atom(X)])]), + %% io:format(user, "HEE50 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(HH)]), + + ?REACT({a50, ?LINE, [{latest_epoch, P_latest#projection_v1.epoch_number}]}), + {{no_change, P_latest#projection_v1.epoch_number}, S}. + +react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, + Rank_newprop, Rank_latest, + #ch_mgr{name=MyName, flap_limit=FlapLimit}=S) -> + ?REACT(b10), + + {_P_newprop_flap_time, P_newprop_flap_count} = get_flap_count(P_newprop), + LatestAllFlapCounts = get_all_flap_counts_counts(P_latest), + P_latest_trans_flap_count = my_find_minmost(LatestAllFlapCounts), + + if + LatestUnanimousP -> + ?REACT({b10, ?LINE, [{latest_unanimous_p, LatestUnanimousP}]}), + put(b10_hack, false), + + react_to_env_C100(P_newprop, P_latest, S); + + P_newprop_flap_count >= FlapLimit -> + %% I am flapping ... what else do I do? + ?REACT({b10, ?LINE, [i_am_flapping, + {newprop_flap_count, P_newprop_flap_count}, + {latest_trans_flap_count, P_latest_trans_flap_count}, + {flap_limit, FlapLimit}]}), + _B10Hack = get(b10_hack), + %% if _B10Hack == false andalso P_newprop_flap_count - FlapLimit - 3 =< 0 -> io:format(user, "{FLAP: ~w flaps ~w}!\n", [S#ch_mgr.name, P_newprop_flap_count]), put(b10_hack, true); true -> ok end, + io:format(user, "{FLAP: ~w flaps ~w}!\n", [S#ch_mgr.name, P_newprop_flap_count]), + + if + %% So, if we noticed a flap count by some FLU X with a + %% count below FlapLimit, then X crashes so that X's + %% flap count remains below FlapLimit, then we could get + %% stuck forever? Hrm, except that 'crashes' ought to be + %% detected by our own failure detector and get us out of + %% this current flapping situation, right? TODO + %% + %% 2015-04-05: If we add 'orelse AllSettled' to this 'if' + %% clause, then we can end up short-circuiting too + %% early. (Where AllSettled comes from the runenv's + %% flapping_i prop.) So, I believe that we need to + %% rely on the failure detector to rescue us. + %% + %% TODO About the above ^^ I think that was based on buggy + %% calculation of AllSettled. Recheck! + %% + %% TODO Yay, another magic constant below, added to + %% FlapLimit, that needs thorough examination and + %% hopefully elimination. I'm adding it to try to + %% make it more likely that someone's private proj + %% will include all_flap_counts_settled,true 100% + %% of the time. But I'm not sure how important that + %% really is. + %% That settled flag can lag behind after a change in + %% network conditions, so I'm not sure how big its + %% value is, if any. +% QQQ TODO +% P_latest_trans_flap_count >= FlapLimit + 20 -> +% %% Everyone that's flapping together now has flap_count +% %% that's larger than the limit. So it's safe and good +% %% to stop here, so we can break the cycle of flapping. +% ?REACT({b10, ?LINE, [flap_stop]}), +% react_to_env_A50(P_latest, S); + + true -> + %% It is our moral imperative to write so that the flap + %% cycle continues enough times so that everyone notices + %% and thus the earlier clause above fires. + ?REACT({b10, ?LINE, [flap_continue]}), + react_to_env_C300(P_newprop, P_latest, S) + end; + + Retries > 2 -> + ?REACT({b10, ?LINE, [{retries, Retries}]}), + put(b10_hack, false), + + %% The author of P_latest is too slow or crashed. + %% Let's try to write P_newprop and see what happens! + react_to_env_C300(P_newprop, P_latest, S); + + Rank_latest >= Rank_newprop + andalso + P_latest#projection_v1.author_server /= MyName -> + ?REACT({b10, ?LINE, + [{rank_latest, Rank_latest}, + {rank_newprop, Rank_newprop}, + {latest_author, P_latest#projection_v1.author_server}]}), + put(b10_hack, false), + + %% Give the author of P_latest an opportunite to write a + %% new projection in a new epoch to resolve this mixed + %% opinion. + react_to_env_C200(Retries, P_latest, S); + + true -> + ?REACT({b10, ?LINE}), + put(b10_hack, false), + + %% P_newprop is best, so let's write it. + react_to_env_C300(P_newprop, P_latest, S) + end. + +react_to_env_C100(P_newprop, P_latest, + #ch_mgr{name=MyName, proj=P_current}=S) -> + ?REACT(c100), + I_am_UPI_in_newprop_p = lists:member(MyName, P_newprop#projection_v1.upi), + I_am_Repairing_in_latest_p = lists:member(MyName, + P_latest#projection_v1.repairing), + ShortCircuit_p = + P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number + andalso + I_am_UPI_in_newprop_p + andalso + I_am_Repairing_in_latest_p, + + case {ShortCircuit_p, projection_transition_is_sane(P_current, P_latest, + MyName)} of + {true, _} -> + %% Someone else believes that I am repairing. We assume + %% that nobody is being Byzantine, so we'll believe that I + %% am/should be repairing. We ignore our proposal and try + %% to go with the latest. + ?REACT({c100, ?LINE, [repairing_short_circuit]}), + react_to_env_C110(P_latest, S); + {_, true} -> + ?REACT({c100, ?LINE, [sane]}), + react_to_env_C110(P_latest, S); + {_, _AnyOtherReturnValue} -> + %% P_latest is not sane. + %% By process of elimination, P_newprop is best, + %% so let's write it. + ?REACT({c100, ?LINE, [not_sane]}), + react_to_env_C300(P_newprop, P_latest, S) + end. + +react_to_env_C110(P_latest, #ch_mgr{myflu=MyFLU} = S) -> + ?REACT(c110), + %% TOOD: Should we carry along any extra info that that would be useful + %% in the dbg2 list? + Extra_todo = [], + RunEnv = S#ch_mgr.runenv, + Islands = proplists:get_value(network_islands, RunEnv), + P_latest2 = update_projection_dbg2( + P_latest, + [%% {network_islands, Islands}, + %% {hooray, {v2, date(), time()}} + Islands--Islands + |Extra_todo]), + + Epoch = P_latest2#projection_v1.epoch_number, + ok = machi_flu0:proj_write(MyFLU, Epoch, private, P_latest2), + case proplists:get_value(private_write_verbose, S#ch_mgr.opts) of + true -> + {_,_,C} = os:timestamp(), + MSec = trunc(C / 1000), + {HH,MM,SS} = time(), + io:format(user, "\n~2..0w:~2..0w:~2..0w.~3..0w ~p uses: ~w\n", + [HH,MM,SS,MSec, S#ch_mgr.name, + make_projection_summary(P_latest2)]); + _ -> + ok + end, + react_to_env_C120(P_latest, S). + +react_to_env_C120(P_latest, #ch_mgr{proj_history=H} = S) -> + ?REACT(c120), + H2 = queue:in(P_latest, H), + H3 = case queue:len(H2) of + %% TODO: revisit this constant? Is this too long as a base? + %% My hunch is that it's fine and that the flap_limit needs to + %% be raised much higher (because it can increase several ticks + %% without a newer public epoch proposed anywhere). + X when X > length(P_latest#projection_v1.all_members) * 2 -> + {_V, Hxx} = queue:out(H2), + Hxx; + _ -> + H2 + end, + + HH = get(react), + io:format(user, "HEE120s ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- HH, is_atom(X)])]), + %% io:format(user, "HEE120 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(HH)]), + + ?REACT({c120, [{latest, make_projection_summary(P_latest)}]}), + {{now_using, P_latest#projection_v1.epoch_number}, + S#ch_mgr{proj=P_latest, proj_history=H3, proj_proposed=none}}. + +react_to_env_C200(Retries, P_latest, S) -> + ?REACT(c200), + try + %% TODO: This code works "well enough" without actually + %% telling anybody anything. Do we want to rip this out? + %% Actually implement it? None of the above? + yo:tell_author_yo(P_latest#projection_v1.author_server) + catch _Type:_Err -> + %% io:format(user, "TODO: tell_author_yo is broken: ~p ~p\n", + %% [_Type, _Err]), + ok + end, + react_to_env_C210(Retries, S). + +react_to_env_C210(Retries, #ch_mgr{name=MyName, proj=Proj} = S) -> + ?REACT(c210), + sleep_ranked_order(10, 100, MyName, Proj#projection_v1.all_members), + react_to_env_C220(Retries, S). + +react_to_env_C220(Retries, S) -> + ?REACT(c220), + react_to_env_A20(Retries + 1, S). + +react_to_env_C300(#projection_v1{epoch_number=_Epoch_newprop}=P_newprop, + #projection_v1{epoch_number=_Epoch_latest}=_P_latest, S) -> + ?REACT(c300), + + %% This logic moved to A30. + %% NewEpoch = erlang:max(Epoch_newprop, Epoch_latest) + 1, + %% P_newprop2 = P_newprop#projection_v1{epoch_number=NewEpoch}, + %% react_to_env_C310(update_projection_checksum(P_newprop2), S). + + react_to_env_C310(update_projection_checksum(P_newprop), S). + +react_to_env_C310(P_newprop, S) -> + ?REACT(c310), + Epoch = P_newprop#projection_v1.epoch_number, + {WriteRes, S2} = cl_write_public_proj_skip_local_error(Epoch, P_newprop, S), + ?REACT({c310, ?LINE, + [{newprop, make_projection_summary(P_newprop)}, + {write_result, WriteRes}]}), + react_to_env_A10(S2). + +calculate_flaps(P_newprop, _P_current, FlapLimit, + #ch_mgr{name=MyName, proj_history=H, flap_start=FlapStart, + flaps=Flaps, runenv=RunEnv0} = S) -> + RunEnv1 = replace(RunEnv0, [{flapping_i, []}]), + HistoryPs = queue:to_list(H), + Ps = HistoryPs ++ [P_newprop], + UniqueProposalSummaries = lists:usort([{P#projection_v1.upi, + P#projection_v1.repairing, + P#projection_v1.down} || P <- Ps]), + + {_WhateverUnanimous, BestP, Props, _S} = + cl_read_latest_projection(private, S), + NotBestPs = proplists:get_value(not_unanimous_answers, Props), + DownUnion = lists:usort( + lists:flatten( + [P#projection_v1.down || + P <- [BestP|NotBestPs]])), + HosedTransUnion = proplists:get_value(trans_all_hosed, Props), + TransFlapCounts0 = proplists:get_value(trans_all_flap_counts, Props), + + _Unanimous = proplists:get_value(unanimous_flus, Props), + _NotUnanimous = proplists:get_value(not_unanimous_flus, Props), + %% NOTE: bad_answer_flus are probably due to timeout or some other network + %% glitch, i.e., anything other than {ok, P::projection()} + %% response from machi_flu0:proj_read_latest(). + BadFLUs = proplists:get_value(bad_answer_flus, Props), + + RemoteTransFlapCounts1 = lists:keydelete(MyName, 1, TransFlapCounts0), + RemoteTransFlapCounts = + [X || {_FLU, {{_FlEpk,FlTime}, _FlapCount}}=X <- RemoteTransFlapCounts1, + FlTime /= ?NOT_FLAPPING], + TempNewFlaps = Flaps + 1, + TempAllFlapCounts = lists:sort([{MyName, {FlapStart, TempNewFlaps}}| + RemoteTransFlapCounts]), + %% Sanity check. + true = lists:all(fun({_,{_,_}}) -> true; + (_) -> false end, TempAllFlapCounts), + + %% H is the bounded history of all of this manager's private + %% projection store writes. If we've proposed the *same* + %% {UPI+Repairing, Down} combination for the entire length of our + %% bounded size of H, then we're flapping. + %% + %% If we're flapping, then we use our own flap counter and that of + %% all of our peer managers to see if we've all got flap counters + %% that exceed the flap_limit. If that global condition appears + %% true, then we "blow the circuit breaker" by stopping our + %% participation in the flapping store (via the shortcut to A50). + %% + %% We reset our flap counter on any of several conditions: + %% + %% 1. If our bounded history H contains more than one proposal, + %% then by definition we are not flapping. + %% 2. If a remote manager is flapping and has re-started a new + %% flapping episode. + %% 3. If one of the remote managers that we saw earlier has + %% stopped flapping. + + ?REACT({calculate_flaps, queue:len(H), UniqueProposalSummaries}), + case {queue:len(H), UniqueProposalSummaries} of + {N, [_]} when N >= length(P_newprop#projection_v1.all_members) -> + NewFlaps = TempNewFlaps, + if element(2,FlapStart) == ?NOT_FLAPPING -> + NewFlapStart = {{epk,P_newprop#projection_v1.epoch_number},now()}; + true -> + NewFlapStart = FlapStart + end, + + %% Wow, this behavior is almost spooky. + %% + %% For an example partition map [{c,a}], on the very first + %% time this 'if' clause is hit by FLU b, AllHosed=[a,c]. + %% How the heck does B know that?? + %% + %% If I use: + %% DownUnionQQQ = [{P#projection_v1.epoch_number, P#projection_v1.author_server, P#projection_v1.down} || P <- [BestP|NotBestPs]], + %% AllHosed = [x_1] ++ DownUnion ++ [x_2] ++ HosedTransUnion ++ [x_3] ++ BadFLUs ++ [{downunionqqq, DownUnionQQQ}]; + %% + %% ... then b sees this when proposing epoch 451: + %% + %% {all_hosed, + %% [x_1,a,c,x_2,x_3, + %% {downunionqqq, + %% [{450,a,[c]},{449,b,[]},{448,c,[a]},{441,d,[]}]}]}, + %% + %% So b's working on epoch 451 at the same time that d's latest + %% public projection is only epoch 441. But there's enough + %% lag so that b can "see" that a's bad=[c] (due to t_timeout!) + %% and c's bad=[a]. So voila, b magically knows about both + %% problem FLUs. Weird/cool. + + AllFlapCounts = TempAllFlapCounts, + AllHosed = lists:usort(DownUnion ++ HosedTransUnion ++ BadFLUs); + {_N, _} -> + NewFlaps = 0, + NewFlapStart = {{epk,-1},?NOT_FLAPPING}, + AllFlapCounts = [], + AllHosed = [] + end, + + %% If there's at least one count in AllFlapCounts that isn't my + %% flap count, and if it's over the flap limit, then consider them + %% settled. + AllFlapCountsSettled = lists:keydelete(MyName, 1, AllFlapCounts) /= [] + andalso + my_find_minmost(AllFlapCounts) >= FlapLimit, + FlappingI = {flapping_i, [{flap_count, {NewFlapStart, NewFlaps}}, + {all_hosed, AllHosed}, + {all_flap_counts, lists:sort(AllFlapCounts)}, + {all_flap_counts_settled, AllFlapCountsSettled}, + {bad,BadFLUs}, + {da_downu, DownUnion}, % debugging aid + {da_hosedtu, HosedTransUnion}, % debugging aid + {da_downreports, [{P#projection_v1.epoch_number, P#projection_v1.author_server, P#projection_v1.down} || P <- [BestP|NotBestPs]]} % debugging aid + ]}, + Dbg2 = [FlappingI|P_newprop#projection_v1.dbg], + %% SLF TODO: 2015-03-04: I'm growing increasingly suspicious of + %% the 'runenv' variable that's threaded through all this code. + %% It isn't doing what I'd originally intended. And I think that + %% the flapping information that we've just constructed here is + %% going to get lost, and that's a shame. Fix it. + RunEnv2 = replace(RunEnv1, [FlappingI]), + %% NOTE: If we'd increment of flaps here, that doesn't mean that + %% someone's public proj store has been updated. For example, + %% if we loop through states C2xx a few times, we would incr + %% flaps each time ... but the C2xx path doesn't write a new + %% proposal to everyone's public proj stores, and there's no + %% guarantee that anyone else as written a new public proj either. + {update_projection_checksum(P_newprop#projection_v1{dbg=Dbg2}), + S#ch_mgr{flaps=NewFlaps, flap_start=NewFlapStart, runenv=RunEnv2}}. + +projection_transitions_are_sane(Ps, RelativeToServer) -> + projection_transitions_are_sane(Ps, RelativeToServer, false). + +-ifdef(TEST). +projection_transitions_are_sane_retrospective(Ps, RelativeToServer) -> + projection_transitions_are_sane(Ps, RelativeToServer, true). +-endif. % TEST + +projection_transitions_are_sane([], _RelativeToServer, _RetrospectiveP) -> + true; +projection_transitions_are_sane([_], _RelativeToServer, _RetrospectiveP) -> + true; +projection_transitions_are_sane([P1, P2|T], RelativeToServer, RetrospectiveP) -> + case projection_transition_is_sane(P1, P2, RelativeToServer, + RetrospectiveP) of + true -> + projection_transitions_are_sane([P2|T], RelativeToServer, + RetrospectiveP); + Else -> + Else + end. + +projection_transition_is_sane(P1, P2, RelativeToServer) -> + projection_transition_is_sane(P1, P2, RelativeToServer, false). + +-ifdef(TEST). +projection_transition_is_sane_retrospective(P1, P2, RelativeToServer) -> + projection_transition_is_sane(P1, P2, RelativeToServer, true). +-endif. % TEST + +projection_transition_is_sane( + #projection_v1{epoch_number=Epoch1, + epoch_csum=CSum1, + creation_time=CreationTime1, + author_server=AuthorServer1, + all_members=All_list1, + down=Down_list1, + upi=UPI_list1, + repairing=Repairing_list1, + dbg=Dbg1} = P1, + #projection_v1{epoch_number=Epoch2, + epoch_csum=CSum2, + creation_time=CreationTime2, + author_server=AuthorServer2, + all_members=All_list2, + down=Down_list2, + upi=UPI_list2, + repairing=Repairing_list2, + dbg=Dbg2} = P2, + RelativeToServer, RetrospectiveP) -> + try + %% General notes: + %% + %% I'm making no attempt to be "efficient" here. All of these data + %% structures are small, and they're not called zillions of times per + %% second. + %% + %% The chain sequence/order checks at the bottom of this function aren't + %% as easy-to-read as they ought to be. However, I'm moderately confident + %% that it isn't buggy. TODO: refactor them for clarity. + + true = is_integer(Epoch1) andalso is_integer(Epoch2), + true = is_binary(CSum1) andalso is_binary(CSum2), + {_,_,_} = CreationTime1, + {_,_,_} = CreationTime2, + true = is_atom(AuthorServer1) andalso is_atom(AuthorServer2), % todo will probably change + true = is_list(All_list1) andalso is_list(All_list2), + true = is_list(Down_list1) andalso is_list(Down_list2), + true = is_list(UPI_list1) andalso is_list(UPI_list2), + true = is_list(Repairing_list1) andalso is_list(Repairing_list2), + true = is_list(Dbg1) andalso is_list(Dbg2), + + true = Epoch2 > Epoch1, + All_list1 = All_list2, % todo will probably change + + %% No duplicates + true = lists:sort(Down_list2) == lists:usort(Down_list2), + true = lists:sort(UPI_list2) == lists:usort(UPI_list2), + true = lists:sort(Repairing_list2) == lists:usort(Repairing_list2), + + %% Disjoint-ness + true = lists:sort(All_list2) == lists:sort(Down_list2 ++ UPI_list2 ++ + Repairing_list2), + [] = [X || X <- Down_list2, not lists:member(X, All_list2)], + [] = [X || X <- UPI_list2, not lists:member(X, All_list2)], + [] = [X || X <- Repairing_list2, not lists:member(X, All_list2)], + DownS2 = sets:from_list(Down_list2), + UPIS2 = sets:from_list(UPI_list2), + RepairingS2 = sets:from_list(Repairing_list2), + true = sets:is_disjoint(DownS2, UPIS2), + true = sets:is_disjoint(DownS2, RepairingS2), + true = sets:is_disjoint(UPIS2, RepairingS2), + + %% The author must not be down. + false = lists:member(AuthorServer1, Down_list1), + false = lists:member(AuthorServer2, Down_list2), + %% The author must be in either the UPI or repairing list. + true = lists:member(AuthorServer1, UPI_list1 ++ Repairing_list1), + true = lists:member(AuthorServer2, UPI_list2 ++ Repairing_list2), + + %% Additions to the UPI chain may only be at the tail + UPI_common_prefix = find_common_prefix(UPI_list1, UPI_list2), + if UPI_common_prefix == [] -> + if UPI_list1 == [] orelse UPI_list2 == [] -> + %% If the common prefix is empty, then one of the + %% inputs must be empty. + true; + true -> + %% Otherwise, we have a case of UPI changing from + %% one of these two situations: + %% + %% UPI_list1 -> UPI_list2 + %% ------------------------------------------------- + %% [d,c,b,a] -> [c,a] + %% [d,c,b,a] -> [c,a,repair_finished_added_to_tail]. + NotUPI2 = (Down_list2 ++ Repairing_list2), + case lists:prefix(UPI_list1 -- NotUPI2, UPI_list2) of + true -> + true; + false -> + %% Here's a possible failure scenario: + %% UPI_list1 -> UPI_list2 + %% Repairing_list1 -> Repairing_list2 + %% ----------------------------------- + %% [a,b,c] author=a -> [c,a] author=c + %% [] [b] + %% + %% ... where RelativeToServer=b. In this case, b + %% has been partitions for a while and has only + %% now just learned of several epoch transitions. + %% If the author of both is also in the UPI of + %% both, then those authors would not have allowed + %% a bad transition, so we will assume this + %% transition is OK. + lists:member(AuthorServer1, UPI_list1) + andalso + lists:member(AuthorServer2, UPI_list2) + end + end; + true -> + true + end, + true = lists:prefix(UPI_common_prefix, UPI_list1), + true = lists:prefix(UPI_common_prefix, UPI_list2), + UPI_1_suffix = UPI_list1 -- UPI_common_prefix, + UPI_2_suffix = UPI_list2 -- UPI_common_prefix, + + MoreCheckingP = + RelativeToServer == undefined + orelse + not (lists:member(RelativeToServer, Down_list2) orelse + lists:member(RelativeToServer, Repairing_list2)), + + if not MoreCheckingP -> + ok; + MoreCheckingP -> + %% Where did elements in UPI_2_suffix come from? + %% Only two sources are permitted. + [lists:member(X, Repairing_list1) % X added after repair done + orelse + lists:member(X, UPI_list1) % X in UPI_list1 after common pref + || X <- UPI_2_suffix], + + %% The UPI_2_suffix must exactly be equal to: ordered items from + %% UPI_list1 concat'ed with ordered items from Repairing_list1. + %% Both temp vars below preserve relative order! + UPI_2_suffix_from_UPI1 = [X || X <- UPI_1_suffix, + lists:member(X, UPI_list2)], + UPI_2_suffix_from_Repairing1 = [X || X <- UPI_2_suffix, + lists:member(X, Repairing_list1)], + %% true? + UPI_2_concat = (UPI_2_suffix_from_UPI1 ++ UPI_2_suffix_from_Repairing1), + if UPI_2_suffix == UPI_2_concat -> + ok; + true -> + if RetrospectiveP -> + %% We are in retrospective mode. But there are + %% some transitions that are difficult to find + %% when standing outside of all of the FLUs and + %% examining their behavior. (In contrast to + %% this same function being called "in the path" + %% of a projection transition by a particular FLU + %% which knows exactly its prior projection and + %% exactly what it intends to do.) Perhaps this + %% exception clause here can go away with + %% better/more clever retrospection analysis? + %% + %% Here's a case that PULSE found: + %% FLU B: + %% E=257: UPI=[c,a], REPAIRING=[b] + %% E=284: UPI=[c,a], REPAIRING=[b] + %% FLU a: + %% E=251: UPI=[c], REPAIRING=[a,b] + %% E=284: UPI=[c,a], REPAIRING=[b] + %% FLU c: + %% E=282: UPI=[c], REPAIRING=[a,b] + %% E=284: UPI=[c,a], REPAIRING=[b] + %% + %% From the perspective of each individual FLU, + %% the unanimous transition at epoch #284 is + %% good. The repair that is done by FLU c -> a + %% is likewise good. + %% + %% From a retrospective point of view (and the + %% current implementation), there's a bad-looking + %% transition from epoch #269 to #284. This is + %% from the point of view of the last two + %% unanimous private projection store epochs: + %% + %% E=269: UPI=[c], REPAIRING=[], DOWN=[a,b] + %% E=284: UPI=[c,a], REPAIRING=[b] + %% + %% The retrospective view by + %% machi_chain_manager1_pulse.erl just can't + %% reason correctly about this situation. We + %% will instead rely on the non-introspective + %% sanity checking that each FLU does before it + %% writes to its private projection store and + %% then adopts that projection (and unwedges + %% itself, etc etc). + + %% io:format(user, "QQQ: RetrospectiveP ~p\n", [RetrospectiveP]), + %% io:format(user, "QQQ: UPI_2_suffix ~p\n", [UPI_2_suffix]), + %% io:format(user, "QQQ: UPI_2_suffix_from_UPI1 ~p\n", [UPI_2_suffix_from_UPI1]), + %% io:format(user, "QQQ: UPI_2_suffix_from_Repairing1 ~p\n", [UPI_2_suffix_from_Repairing1]), + io:format(user, "|~p,~p TODO revisit|", + [?MODULE, ?LINE]), + ok; + not RetrospectiveP -> + exit({upi_2_suffix_error}) + end + end + end, + true + catch + _Type:_Err -> + S1 = make_projection_summary(P1), + S2 = make_projection_summary(P2), + Trace = erlang:get_stacktrace(), + %% TODO: this history goop is useful sometimes for debugging but + %% not for any "real" use. Get rid of it, for the long term. + H = (catch [{FLUName, Type, P#projection_v1.epoch_number, make_projection_summary(P)} || + FLUName <- P1#projection_v1.all_members, + Type <- [public,private], + P <- machi_flu0:proj_get_all(FLUName, Type)]), + {err, _Type, _Err, from, S1, to, S2, relative_to, RelativeToServer, + history, (catch lists:sort(H)), + stack, Trace} + end. + +find_common_prefix([], _) -> + []; +find_common_prefix(_, []) -> + []; +find_common_prefix([H|L1], [H|L2]) -> + [H|find_common_prefix(L1, L2)]; +find_common_prefix(_, _) -> + []. + +sleep_ranked_order(MinSleep, MaxSleep, FLU, FLU_list) -> + Front = lists:takewhile(fun(X) -> X /=FLU end, FLU_list), + Index = length(Front) + 1, + NumNodes = length(FLU_list), + SleepIndex = NumNodes - Index, + SleepChunk = MaxSleep div NumNodes, + SleepTime = MinSleep + (SleepChunk * SleepIndex), + timer:sleep(SleepTime), + SleepTime. + +my_find_minmost([]) -> + 0; +my_find_minmost([{_,_}|_] = TransFlapCounts0) -> + lists:min([FlapCount || {_T, {_FlTime, FlapCount}} <- TransFlapCounts0]); +my_find_minmost(TransFlapCounts0) -> + lists:min(TransFlapCounts0). + +get_raw_flapping_i(#projection_v1{dbg=Dbg}) -> + proplists:get_value(flapping_i, Dbg, []). + +get_flap_count(P) -> + proplists:get_value(flap_count, get_raw_flapping_i(P), 0). + +get_all_flap_counts(P) -> + proplists:get_value(all_flap_counts, get_raw_flapping_i(P), []). + +get_all_flap_counts_counts(P) -> + case get_all_flap_counts(P) of + [] -> + []; + [{_,{_,_}}|_] = Cs -> + [Count || {_FLU, {_Time, Count}} <- Cs] + end. + +get_all_hosed(P) when is_record(P, projection_v1)-> + proplists:get_value(all_hosed, get_raw_flapping_i(P), []); +get_all_hosed(S) when is_record(S, ch_mgr) -> + proplists:get_value(all_hosed, + proplists:get_value(flapping_i, S#ch_mgr.runenv, []), + []). + +merge_flap_counts(FlapCounts) -> + merge_flap_counts(FlapCounts, orddict:new()). + +merge_flap_counts([], D) -> + orddict:to_list(D); +merge_flap_counts([FlapCount|Rest], D1) -> + %% We know that FlapCount is list({Actor, {{_epk,FlapStartTime},NumFlaps}}). + D2 = orddict:from_list(FlapCount), + D2 = orddict:from_list(FlapCount), + %% If the FlapStartTimes are identical, then pick the bigger flap count. + %% If the FlapStartTimes differ, then pick the larger start time tuple. + D3 = orddict:merge(fun(_Key, {{_,T1}, NF1}= V1, {{_,T2}, NF2}=V2) + when T1 == T2 -> + if NF1 > NF2 -> + V1; + true -> + V2 + end; + (_Key, {{_,T1},_NF1}= V1, {{_,T2},_NF2}=V2) -> + if T1 > T2 -> + V1; + true -> + V2 + end; + (_Key, V1, V2) -> + exit({bad_merge_2tuples,mod,?MODULE,line,?LINE, + _Key, V1, V2}) + end, D1, D2), + merge_flap_counts(Rest, D3). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +perhaps_call_t(S, Partitions, FLU, DoIt) -> + try + perhaps_call(S, Partitions, FLU, DoIt) + catch + exit:timeout -> + t_timeout + end. + +perhaps_call(#ch_mgr{name=MyName, myflu=MyFLU}, Partitions, FLU, DoIt) -> + RemoteFLU_p = FLU /= MyFLU, + case RemoteFLU_p andalso lists:member({MyName, FLU}, Partitions) of + false -> + Res = DoIt(), + case RemoteFLU_p andalso lists:member({FLU, MyName}, Partitions) of + false -> + Res; + _ -> + (catch put(react, [{timeout2,me,MyFLU,to,FLU,RemoteFLU_p,Partitions}|get(react)])), + exit(timeout) + end; + _ -> + (catch put(react, [{timeout1,me,MyFLU,to,FLU,RemoteFLU_p,Partitions}|get(react)])), + exit(timeout) + end. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + diff --git a/test/machi_chain_manager1_pulse.erl b/test/machi_chain_manager1_pulse.erl new file mode 100644 index 0000000..b95cf00 --- /dev/null +++ b/test/machi_chain_manager1_pulse.erl @@ -0,0 +1,379 @@ +%% ------------------------------------------------------------------- +%% +%% Machi: a small village of replicated files +%% +%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(machi_chain_manager1_pulse). + +%% The while module is ifdef:ed, rebar should set PULSE +-ifdef(PULSE). + +-compile(export_all). + +-include_lib("eqc/include/eqc.hrl"). +-include_lib("eqc/include/eqc_statem.hrl"). + +-include("machi.hrl"). + +-include_lib("eunit/include/eunit.hrl"). + +-compile({parse_transform, pulse_instrument}). +-compile({pulse_replace_module, [{application, pulse_application}]}). +%% The following functions contains side_effects but are run outside +%% PULSE, i.e. PULSE needs to leave them alone +-compile({pulse_skip,[{prop_pulse_test_,0}]}). +-compile({pulse_no_side_effect,[{file,'_','_'}, {erlang, now, 0}]}). + +%% Used for output within EUnit... +-define(QC_FMT(Fmt, Args), + io:format(user, Fmt, Args)). + +%% And to force EUnit to output QuickCheck output... +-define(QC_OUT(P), + eqc:on_output(fun(Str, Args) -> ?QC_FMT(Str, Args) end, P)). + +-define(MGR, machi_chain_manager1). +-define(MGRTEST, machi_chain_manager1_test). + +-record(state, { + step=0, + num_pids, + pids, + dump_state + }). + +initial_state() -> + #state{}. + +gen_num_pids() -> + choose(2, 5). + +gen_seed() -> + noshrink({choose(1, 10000), choose(1, 10000), choose(1, 10000)}). + +gen_old_threshold() -> + noshrink(choose(1, 100)). + +gen_no_partition_threshold() -> + noshrink(choose(1, 100)). + +command(#state{step=0}) -> + {call, ?MODULE, setup, [gen_num_pids(), gen_seed()]}; +command(S) -> + frequency([ + { 1, {call, ?MODULE, change_partitions, + [gen_old_threshold(), gen_no_partition_threshold()]}}, + {50, {call, ?MODULE, do_ticks, + [choose(5, 100), S#state.pids, + gen_old_threshold(), gen_no_partition_threshold()]}} + ]). + +precondition(_S, _) -> + true. + +next_state(#state{step=Step}=S, Res, Call) -> + next_state2(S#state{step=Step + 1}, Res, Call). + +next_state2(S, Res, {call, _, setup, [NumPids, _Seed]}) -> + S#state{num_pids=NumPids, pids=Res}; +next_state2(S, Res, {call, _, dump_state, _Args}) -> + S#state{dump_state=Res}; +next_state2(S, _Res, {call, _, _Func, _Args}) -> + S. + +postcondition(_S, {call, _, _Func, _Args}, _Res) -> + true. + +all_list() -> + [a,b,c]. + %% [a,b,c,d,e]. + +setup(_Num, Seed) -> + ?QC_FMT("\nsetup,", []), + All_list = all_list(), + _ = machi_partition_simulator:start_link(Seed, 0, 100), + _Partitions = machi_partition_simulator:get(All_list), + + FLU_pids = [begin + {ok, FLUPid} = machi_flu0:start_link(Name), + _ = machi_flu0:get_epoch(FLUPid), + FLUPid + end || Name <- All_list], + Namez = lists:zip(All_list, FLU_pids), + Mgr_pids = [begin + {ok, Mgr} = ?MGR:start_link(Name, All_list, FLU_pid), + Mgr + end || {Name, FLU_pid} <- Namez], + timer:sleep(1), + {ok, P1} = ?MGR:test_calc_projection(hd(Mgr_pids), false), + P1Epoch = P1#projection.epoch_number, + [ok = machi_flu0:proj_write(FLU, P1Epoch, public, P1) || FLU <- FLU_pids], + [?MGR:test_react_to_env(Mgr) || Mgr <- Mgr_pids], + + Res = {FLU_pids, Mgr_pids}, + put(manager_pids_hack, Res), + Res. + +change_partitions(OldThreshold, NoPartitionThreshold) -> + machi_partition_simulator:reset_thresholds(OldThreshold, + NoPartitionThreshold). + +always_last_partitions() -> + machi_partition_simulator:always_last_partitions(). + +private_stable_check(FLUs) -> + {_FLU_pids, Mgr_pids} = get(manager_pids_hack), + Res = private_projections_are_stable_check(FLUs, Mgr_pids), + if not Res -> + io:format(user, "BUMMER: private stable check failed!\n", []); + true -> + ok + end, + Res. + +do_ticks(Num, PidsMaybe, OldThreshold, NoPartitionThreshold) -> + io:format(user, "~p,~p,~p|", [Num, OldThreshold, NoPartitionThreshold]), + {_FLU_pids, Mgr_pids} = case PidsMaybe of + undefined -> get(manager_pids_hack); + _ -> PidsMaybe + end, + if is_integer(OldThreshold) -> + machi_partition_simulator:reset_thresholds(OldThreshold, + NoPartitionThreshold); + true -> + ?QC_FMT("{e=~w},", [get_biggest_private_epoch_number()]), + machi_partition_simulator:no_partitions() + end, + Res = exec_ticks(Num, Mgr_pids), + if not is_integer(OldThreshold) -> + ?QC_FMT("{e=~w},", [get_biggest_private_epoch_number()]); + true -> + ok + end, + Res. + +get_biggest_private_epoch_number() -> + lists:last( + lists:usort( + lists:flatten( + [machi_flu0:proj_list_all(FLU, private) || + FLU <- all_list()]))). + +dump_state() -> + try + ?QC_FMT("dump_state(", []), + {FLU_pids, _Mgr_pids} = get(manager_pids_hack), + Namez = zip(all_list(), FLU_pids), + Report = ?MGRTEST:unanimous_report(Namez), + %% ?QC_FMT("Report ~p\n", [Report]), + + Diag1 = [begin + Ps = machi_flu0:proj_get_all(FLU, Type), + [io_lib:format("~p ~p ~p: ~w\n", [FLUName, Type, P#projection.epoch_number, ?MGR:make_projection_summary(P)]) || P <- Ps] + end || {FLUName, FLU} <- Namez, + Type <- [public] ], + + UniquePrivateEs = + lists:usort(lists:flatten( + [machi_flu0:proj_list_all(FLU, private) || + {_FLUName, FLU} <- Namez])), + P_lists0 = [{FLUName, Type, machi_flu0:proj_get_all(FLUPid, Type)} || + {FLUName, FLUPid} <- Namez, Type <- [public,private]], + P_lists = [{FLUName, Type, P} || {FLUName, Type, Ps} <- P_lists0, + P <- Ps], + AllDict = lists:foldl(fun({FLU, Type, P}, D) -> + K = {FLU, Type, P#projection.epoch_number}, + dict:store(K, P, D) + end, dict:new(), lists:flatten(P_lists)), + DumbFinderBackward = + fun(FLUName) -> + fun(E, error_unwritten) -> + case dict:find({FLUName, private, E}, AllDict) of + {ok, T} -> T; + error -> error_unwritten + end; + %% case machi_flu0:proj_read(FLU, E, private) of + %% {ok, T} -> T; + %% Else -> Else + %% end; + (_E, Acc) -> + Acc + end + end, + Diag2 = [[ + io_lib:format("~p private: ~w\n", + [FLUName, + ?MGR:make_projection_summary( + lists:foldl(DumbFinderBackward(FLUName), + error_unwritten, + lists:seq(Epoch, 0, -1)))]) + || {FLUName, _FLU} <- Namez] + || Epoch <- UniquePrivateEs], + + ?QC_FMT(")", []), + {Report, lists:flatten([Diag1, Diag2])} + catch XX:YY -> + ?QC_FMT("OUCH: ~p ~p @ ~p\n", [XX, YY, erlang:get_stacktrace()]) + end. + +prop_pulse() -> + ?FORALL({Cmds0, Seed}, {non_empty(commands(?MODULE)), pulse:seed()}, + ?IMPLIES(1 < length(Cmds0) andalso length(Cmds0) < 5, + begin + ok = shutdown_hard(), + %% PULSE can be really unfair, of course, including having exec_ticks + %% run where all of FLU a does its ticks then FLU b. Such a situation + %% doesn't always allow unanimous private projection store values: + %% FLU a might need one more tick to write its private projection, but + %% it isn't given a chance at the end of the PULSE run. So we cheat + Stabilize1 = [{set,{var,99999995}, + {call, ?MODULE, always_last_partitions, []}}], + Stabilize2 = [{set,{var,99999996}, + {call, ?MODULE, private_stable_check, [all_list()]}}], + LastTriggerTicks = {set,{var,99999997}, + {call, ?MODULE, do_ticks, [25, undefined, no, no]}}, + Cmds1 = lists:duplicate(2, LastTriggerTicks), + %% Cmds1 = lists:duplicate(length(all_list())*2, LastTriggerTicks), + Cmds = Cmds0 ++ + Stabilize1 ++ + Cmds1 ++ + Stabilize2 ++ + [{set,{var,99999999}, {call, ?MODULE, dump_state, []}}], + {_H2, S2, Res} = pulse:run( + fun() -> + {_H, _S, _R} = run_commands(?MODULE, Cmds) + end, [{seed, Seed}, + {strategy, unfair}]), + ok = shutdown_hard(), + + {Report, Diag} = S2#state.dump_state, + + %% Report is ordered by Epoch. For each private projection + %% written during any given epoch, confirm that all chain + %% members appear in only one unique chain, i.e., the sets of + %% unique chains are disjoint. + AllDisjointP = ?MGRTEST:all_reports_are_disjoint(Report), + + %% Given the report, we flip it around so that we observe the + %% sets of chain transitions relative to each FLU. + R_Chains = [?MGRTEST:extract_chains_relative_to_flu(FLU, Report) || + FLU <- all_list()], + R_Projs = [{FLU, [?MGRTEST:chain_to_projection( + FLU, Epoch, UPI, Repairing, all_list()) || + {Epoch, UPI, Repairing} <- E_Chains]} || + {FLU, E_Chains} <- R_Chains], + + %% For each chain transition experienced by a particular FLU, + %% confirm that each state transition is OK. + Sane = + [{FLU,_SaneRes} = {FLU,?MGR:projection_transitions_are_sane_retrospective( + Ps, FLU)} || + {FLU, Ps} <- R_Projs], + SaneP = lists:all(fun({_FLU, SaneRes}) -> SaneRes == true end, Sane), + + %% The final report item should say that all are agreed_membership. + {_LastEpoch, {ok_disjoint, LastRepXs}} = lists:last(Report), + AgreedOrNot = lists:usort([element(1, X) || X <- LastRepXs]), + + %% TODO: Check that we've converged to a single chain with no repairs. + SingleChainNoRepair = case LastRepXs of + [{agreed_membership,{_UPI,[]}}] -> + true; + _ -> + LastRepXs + end, + + ?WHENFAIL( + begin + ?QC_FMT("Res = ~p\n", [Res]), + ?QC_FMT("Diag = ~s\n", [Diag]), + ?QC_FMT("Report = ~p\n", [Report]), + ?QC_FMT("Sane = ~p\n", [Sane]), + ?QC_FMT("SingleChainNoRepair failure =\n ~p\n", [SingleChainNoRepair]) + end, + conjunction([{res, Res == true orelse Res == ok}, + {all_disjoint, AllDisjointP}, + {sane, SaneP}, + {all_agreed_at_end, AgreedOrNot == [agreed_membership]}, + {single_chain_no_repair, SingleChainNoRepair} + ])) + end)). + +prop_pulse_test_() -> + Timeout = case os:getenv("PULSE_TIME") of + false -> 60; + Val -> list_to_integer(Val) + end, + ExtraTO = case os:getenv("PULSE_SHRINK_TIME") of + false -> 0; + Val2 -> list_to_integer(Val2) + end, + {timeout, (Timeout+ExtraTO+300), % 300 = a bit more fudge time + fun() -> + ?assert(eqc:quickcheck(eqc:testing_time(Timeout, + ?QC_OUT(prop_pulse())))) + end}. + +shutdown_hard() -> + (catch machi_partition_simulator:stop()), + [(catch machi_flu0:stop(X)) || X <- all_list()], + timer:sleep(1), + (catch exit(whereis(machi_partition_simulator), kill)), + [(catch exit(whereis(X), kill)) || X <- all_list()], + erlang:yield(), + ok. + +exec_ticks(Num, Mgr_pids) -> + Parent = self(), + Pids = [spawn_link(fun() -> + [begin + erlang:yield(), + Max = 10, + Elapsed = + ?MGR:sleep_ranked_order(1, Max, M_name, all_list()), + Res = ?MGR:test_react_to_env(MMM), + timer:sleep(erlang:max(0, Max - Elapsed)), + Res=Res %% ?D({self(), Res}) + end || _ <- lists:seq(1,Num)], + Parent ! done + end) || {M_name, MMM} <- lists:zip(all_list(), Mgr_pids) ], + [receive + done -> + ok + after 5000 -> + exit(icky_timeout) + end || _ <- Pids], + ok. + +private_projections_are_stable_check(All_list, Mgr_pids) -> + %% TODO: extend the check to look not only for latest num, but + %% also check for flapping, and if yes, to see if all_hosed are + %% all exactly equal. + + _ = exec_ticks(40, Mgr_pids), + Private1 = [machi_flu0:proj_get_latest_num(FLU, private) || + FLU <- All_list], + _ = exec_ticks(5, Mgr_pids), + Private2 = [machi_flu0:proj_get_latest_num(FLU, private) || + FLU <- All_list], + + (Private1 == Private2). + + +-endif. % PULSE diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl new file mode 100644 index 0000000..def16c7 --- /dev/null +++ b/test/machi_chain_manager1_test.erl @@ -0,0 +1,589 @@ +%% ------------------------------------------------------------------- +%% +%% Machi: a small village of replicated files +%% +%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(machi_chain_manager1_test). + +-include("machi.hrl"). +-include("machi_projection.hrl"). + +-define(MGR, machi_chain_manager1). + +-define(D(X), io:format(user, "~s ~p\n", [??X, X])). +-define(Dw(X), io:format(user, "~s ~w\n", [??X, X])). +-define(FLU_C, machi_flu1_client). + +-export([]). + +-ifdef(TEST). + +-ifdef(EQC). +-include_lib("eqc/include/eqc.hrl"). +%% -include_lib("eqc/include/eqc_statem.hrl"). +-define(QC_OUT(P), + eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)). +-endif. + +-include_lib("eunit/include/eunit.hrl"). +-compile(export_all). + +unanimous_report(Namez) -> + UniquePrivateEs = + lists:usort(lists:flatten( + [machi_flu0:proj_list_all(FLU, private) || + {_FLUName, FLU} <- Namez])), + [unanimous_report(Epoch, Namez) || Epoch <- UniquePrivateEs]. + +unanimous_report(Epoch, Namez) -> + Projs = [{FLUName, case machi_flu0:proj_read(FLU, Epoch, private) of + {ok, T} -> T; + _Else -> not_in_this_epoch + end} || {FLUName, FLU} <- Namez], + UPI_R_Sums = [{Proj#projection_v1.upi, Proj#projection_v1.repairing, + Proj#projection_v1.epoch_csum} || + {_FLUname, Proj} <- Projs, + is_record(Proj, projection_v1)], + UniqueUPIs = lists:usort([UPI || {UPI, _Repairing, _CSum} <- UPI_R_Sums]), + Res = + [begin + case lists:usort([CSum || {U, _Repairing, CSum} <- UPI_R_Sums, + U == UPI]) of + [_1CSum] -> + %% Yay, there's only 1 checksum. Let's check + %% that all FLUs are in agreement. + {UPI, Repairing, _CSum} = + lists:keyfind(UPI, 1, UPI_R_Sums), + %% TODO: make certain that this subtlety doesn't get + %% last in later implementations. + + %% So, this is a bit of a tricky thing. If we're at + %% upi=[c] and repairing=[a,b], then the transition + %% (eventually!) to upi=[c,a] does not currently depend + %% on b being an active participant in the repair. + %% + %% Yes, b's state is very important for making certain + %% that all repair operations succeed both to a & b. + %% However, in this simulation, we only consider that + %% the head(Repairing) is sane. Therefore, we use only + %% the "HeadOfRepairing" in our considerations here. + HeadOfRepairing = case Repairing of + [H_Rep|_] -> + [H_Rep]; + _ -> + [] + end, + Tmp = [{FLU, case proplists:get_value(FLU, Projs) of + P when is_record(P, projection_v1) -> + P#projection_v1.epoch_csum; + Else -> + Else + end} || FLU <- UPI ++ HeadOfRepairing], + case lists:usort([CSum || {_FLU, CSum} <- Tmp]) of + [_] -> + {agreed_membership, {UPI, Repairing}}; + Else2 -> + {not_agreed, {UPI, Repairing}, Else2} + end; + _Else -> + {UPI, not_unique, Epoch, _Else} + end + end || UPI <- UniqueUPIs], + AgreedResUPI_Rs = [UPI++Repairing || + {agreed_membership, {UPI, Repairing}} <- Res], + Tag = case lists:usort(lists:flatten(AgreedResUPI_Rs)) == + lists:sort(lists:flatten(AgreedResUPI_Rs)) of + true -> + ok_disjoint; + false -> + bummer_NOT_DISJOINT + end, + {Epoch, {Tag, Res}}. + +all_reports_are_disjoint(Report) -> + [] == [X || {_Epoch, Tuple}=X <- Report, + element(1, Tuple) /= ok_disjoint]. + +extract_chains_relative_to_flu(FLU, Report) -> + {FLU, [{Epoch, UPI, Repairing} || + {Epoch, {ok_disjoint, Es}} <- Report, + {agreed_membership, {UPI, Repairing}} <- Es, + lists:member(FLU, UPI) orelse lists:member(FLU, Repairing)]}. + +chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> + ?MGR:make_projection(Epoch, MyName, All_list, + All_list -- (UPI_list ++ Repairing_list), + UPI_list, Repairing_list, []). + +-ifndef(PULSE). + +smoke0_test() -> + {ok, _} = machi_partition_simulator:start_link({1,2,3}, 50, 50), + Host = "localhost", + TcpPort = 6623, + {ok, FLUa} = machi_flu1:start_link([{a,TcpPort,"./data.a"}]), + {ok, M0} = ?MGR:start_link(a, [a,b,c], a), + SockA = machi_util:connect(Host, TcpPort), + try + pong = ?MGR:ping(M0) + after + ok = ?MGR:stop(M0), + ok = machi_flu0:stop(FLUa), + ok = machi_partition_simulator:stop() + end. + +smoke1_testTODO() -> + machi_partition_simulator:start_link({1,2,3}, 100, 0), + {ok, FLUa} = machi_flu0:start_link(a), + {ok, FLUb} = machi_flu0:start_link(b), + {ok, FLUc} = machi_flu0:start_link(c), + I_represent = I_am = a, + {ok, M0} = ?MGR:start_link(I_represent, [a,b,c], I_am), + try + {ok, _P1} = ?MGR:test_calc_projection(M0, false), + + _ = ?MGR:test_calc_proposed_projection(M0), + {local_write_result, ok, + {remote_write_results, [{b,ok},{c,ok}]}} = + ?MGR:test_write_proposed_projection(M0), + {unanimous, P1, Extra1} = ?MGR:test_read_latest_public_projection(M0, false), + + ok + after + ok = ?MGR:stop(M0), + ok = machi_flu0:stop(FLUa), + ok = machi_flu0:stop(FLUb), + ok = machi_flu0:stop(FLUc), + ok = machi_partition_simulator:stop() + end. + +nonunanimous_setup_and_fix_testTODO() -> + machi_partition_simulator:start_link({1,2,3}, 100, 0), + {ok, FLUa} = machi_flu0:start_link(a), + {ok, FLUb} = machi_flu0:start_link(b), + I_represent = I_am = a, + {ok, Ma} = ?MGR:start_link(I_represent, [a,b], I_am), + {ok, Mb} = ?MGR:start_link(b, [a,b], b), + try + {ok, P1} = ?MGR:test_calc_projection(Ma, false), + + P1a = ?MGR:update_projection_checksum( + P1#projection_v1{down=[b], upi=[a], dbg=[{hackhack, ?LINE}]}), + P1b = ?MGR:update_projection_checksum( + P1#projection_v1{author_server=b, creation_time=now(), + down=[a], upi=[b], dbg=[{hackhack, ?LINE}]}), + P1Epoch = P1#projection_v1.epoch_number, + ok = machi_flu0:proj_write(FLUa, P1Epoch, public, P1a), + ok = machi_flu0:proj_write(FLUb, P1Epoch, public, P1b), + + ?D(x), + {not_unanimous,_,_}=_XX = ?MGR:test_read_latest_public_projection(Ma, false), + ?Dw(_XX), + {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), + %% The read repair here doesn't automatically trigger the creation of + %% a new projection (to try to create a unanimous projection). So + %% we expect nothing to change when called again. + {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), + + {now_using, _} = ?MGR:test_react_to_env(Ma), + {unanimous,P2,E2} = ?MGR:test_read_latest_public_projection(Ma, false), + {ok, P2pa} = machi_flu0:proj_read_latest(FLUa, private), + P2 = P2pa#projection_v1{dbg2=[]}, + + %% FLUb should still be using proj #0 for its private use + {ok, P0pb} = machi_flu0:proj_read_latest(FLUb, private), + 0 = P0pb#projection_v1.epoch_number, + + %% Poke FLUb to react ... should be using the same private proj + %% as FLUa. + {now_using, _} = ?MGR:test_react_to_env(Mb), + {ok, P2pb} = machi_flu0:proj_read_latest(FLUb, private), + P2 = P2pb#projection_v1{dbg2=[]}, + + ok + after + ok = ?MGR:stop(Ma), + ok = ?MGR:stop(Mb), + ok = machi_flu0:stop(FLUa), + ok = machi_flu0:stop(FLUb), + ok = machi_partition_simulator:stop() + end. + +short_doc() -> +" +A visualization of the convergence behavior of the chain self-management +algorithm for Machi. + 1. Set up 4 FLUs and chain manager pairs. + 2. Create a number of different network partition scenarios, where + (simulated) partitions may be symmetric or asymmetric. Then halt changing + the partitions and keep the simulated network stable and broken. + 3. Run a number of iterations of the algorithm in parallel by poking each + of the manager processes on a random'ish basis. + 4. Afterward, fetch the chain transition changes made by each FLU and + verify that no transition was unsafe. + +During the iteration periods, the following is a cheatsheet for the output. +See the internal source for interpreting the rest of the output. + + 'Let loose the dogs of war!' Network instability + 'SET partitions = ' Network stability (but broken) + 'x uses:' The FLU x has made an internal state transition. The rest of + the line is a dump of internal state. + '{t}' This is a tick event which triggers one of the manager processes + to evaluate its environment and perhaps make a state transition. + +A long chain of '{t}{t}{t}{t}' means that the chain state has settled +to a stable configuration, which is the goal of the algorithm. +Press control-c to interrupt....". + +long_doc() -> + " +'Let loose the dogs of war!' + + The simulated network is very unstable for a few seconds. + +'x uses' + + After a single iteration, server x has determined that the chain + should be defined by the upi, repair, and down list in this record. + If all participants reach the same conclusion at the same epoch + number (and checksum, see next item below), then the chain is + stable, fully configured, and can provide full service. + +'epoch,E' + + The epoch number for this decision is E. The checksum of the full + record is not shown. For purposes of the protocol, a server will + 'wedge' itself and refuse service (until a new config is chosen) + whenever: a). it sees a bigger epoch number mentioned somewhere, or + b). it sees the same epoch number but a different checksum. In case + of b), there was a network partition that has healed, and both sides + had chosen to operate with an identical epoch number but different + chain configs. + +'upi', 'repair', and 'down' + + Members in the chain that are fully in sync and thus preserving the + Update Propagation Invariant, up but under repair (simulated), and + down, respectively. + +'ps,[some list]' + + The list of asymmetric network partitions. {a,b} means that a + cannot send to b, but b can send to a. + + This partition list is recorded for debugging purposes but is *not* + used by the algorithm. The algorithm only 'feels' its effects via + simulated timeout whenever there's a partition in one of the + messaging directions. + +'nodes_up,[list]' + + The best guess right now of which ndoes are up, relative to the + author node, specified by '{author,X}' + +'SET partitions = [some list]' + + All subsequent iterations should have a stable list of partitions, + i.e. the 'ps' list described should be stable. + +'{FLAP: x flaps n}!' + + Server x has detected that it's flapping/oscillating after iteration + n of a naive/1st draft detection algorithm. +". + +convergence_demo_testTODO_() -> + {timeout, 98*300, fun() -> convergence_demo_testfun() end}. + +convergence_demo_testfun() -> + convergence_demo_testfun(3). + +convergence_demo_testfun(NumFLUs) -> + timer:sleep(100), + io:format(user, short_doc(), []), + %% Faster test startup, commented: timer:sleep(3000), + + FLU_biglist = [a,b,c,d,e,f,g], + All_list = lists:sublist(FLU_biglist, NumFLUs), + io:format(user, "\nSET # of FLus = ~w members ~w).\n", + [NumFLUs, All_list]), + machi_partition_simulator:start_link({111,222,33}, 0, 100), + _ = machi_partition_simulator:get(All_list), + + Namez = + [begin + {ok, Pid} = machi_flu0:start_link(Name), + {Name, Pid} + end || Name <- All_list ], + + MgrOpts = [private_write_verbose], + MgrNamez = + [begin + {ok, MPid} = ?MGR:start_link(Name, All_list, FLUPid, MgrOpts), + {Name, MPid} + end || {Name, FLUPid} <- Namez], + try + [{_, Ma}|_] = MgrNamez, + {ok, P1} = ?MGR:test_calc_projection(Ma, false), + P1Epoch = P1#projection_v1.epoch_number, + [ok = machi_flu0:proj_write(FLUPid, P1Epoch, public, P1) || + {_, FLUPid} <- Namez, FLUPid /= Ma], + + machi_partition_simulator:reset_thresholds(10, 50), + _ = machi_partition_simulator:get(All_list), + + Parent = self(), + DoIt = fun(Iters, S_min, S_max) -> + io:format(user, "\nDoIt: top\n\n", []), + Pids = [spawn(fun() -> + random:seed(now()), + [begin + erlang:yield(), + S_max_rand = random:uniform( + S_max + 1), + io:format(user, "{t}", []), + Elapsed = + ?MGR:sleep_ranked_order( + S_min, S_max_rand, + M_name, All_list), + _ = ?MGR:test_react_to_env(MMM), + %% if M_name == d -> + %% [_ = ?MGR:test_react_to_env(MMM) || + %% _ <- lists:seq(1,3)], + %% superunfair; + %% true -> + %% ok + %% end, + %% Be more unfair by not + %% sleeping here. + %% timer:sleep(S_max - Elapsed), + Elapsed + end || _ <- lists:seq(1, Iters)], + Parent ! done + end) || {M_name, MMM} <- MgrNamez ], + [receive + done -> + ok + after 995000 -> + exit(icky_timeout) + end || _ <- Pids] + end, + + _XandYs1 = [[{X,Y}] || X <- All_list, Y <- All_list, X /= Y], + _XandYs2 = [[{X,Y}, {A,B}] || X <- All_list, Y <- All_list, X /= Y, + A <- All_list, B <- All_list, A /= B, + X /= A], + _XandYs3 = [[{X,Y}, {A,B}, {C,D}] || X <- All_list, Y <- All_list, X /= Y, + A <- All_list, B <- All_list, A /= B, + C <- All_list, D <- All_list, C /= D, + X /= A, X /= C, A /= C], + %% AllPartitionCombinations = _XandYs1 ++ _XandYs2, + %% AllPartitionCombinations = _XandYs3, + AllPartitionCombinations = _XandYs1 ++ _XandYs2 ++ _XandYs3, + ?D({?LINE, length(AllPartitionCombinations)}), + + machi_partition_simulator:reset_thresholds(10, 50), + io:format(user, "\nLet loose the dogs of war!\n", []), + DoIt(30, 0, 0), + [begin + io:format(user, "\nSET partitions = ~w.\n", [ [] ]),machi_partition_simulator:no_partitions(), + [DoIt(50, 10, 100) || _ <- [1,2,3]], + + %% machi_partition_simulator:reset_thresholds(10, 50), + %% io:format(user, "\nLet loose the dogs of war!\n", []), + %% DoIt(30, 0, 0), + + machi_partition_simulator:always_these_partitions(Partition), + io:format(user, "\nSET partitions = ~w.\n", [Partition]), + [DoIt(50, 10, 100) || _ <- [1,2,3,4] ], + PPP = + [begin + PPPallPubs = machi_flu0:proj_list_all(FLU, public), + [begin + {ok, Pr} = machi_flu0:proj_read(FLU, PPPepoch, public), + {Pr#projection_v1.epoch_number, FLUName, Pr} + end || PPPepoch <- PPPallPubs] + end || {FLUName, FLU} <- Namez], + io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), + + %%%%%%%% {stable,true} = {stable,private_projections_are_stable(Namez, DoIt)}, + {hosed_ok,true} = {hosed_ok,all_hosed_lists_are_identical(Namez, Partition)}, + io:format(user, "\nSweet, all_hosed are identical-or-islands-inconclusive.\n", []), + timer:sleep(1000), + ok + end || Partition <- AllPartitionCombinations + %% end || Partition <- [ [{a,b},{b,d},{c,b}], + %% [{a,b},{b,d},{c,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], + %% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], + %% [{a,b},{b,d},{c,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], + %% [{a,b},{b,d},{c,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] + %% end || Partition <- [ [{a,b}, {b,c}], + %% [{a,b}, {c,b}] ] + %% end || Partition <- [ [{a,b}, {b,c}] ] %% hosed-not-equal @ 3 FLUs + %% end || Partition <- [ [{a,b}], + %% [{b,a}] ] + %% end || Partition <- [ [{a,b}, {c,b}], + %% [{a,b}, {b,c}] ] + %% end || Partition <- [ [{a,b}, {b,c}, {c,d}], + %% [{a,b}, {b,c},{b,d}, {c,d}], + %% [{b,a}, {b,c}, {c,d}], + %% [{a,b}, {c,b}, {c,d}], + %% [{a,b}, {b,c}, {d,c}] ] + %% end || Partition <- [ [{a,b}, {b,c}, {c,d}, {d,e}], + %% [{b,a}, {b,c}, {c,d}, {d,e}], + %% [{a,b}, {c,b}, {c,d}, {d,e}], + %% [{a,b}, {b,c}, {d,c}, {d,e}], + %% [{a,b}, {b,c}, {c,d}, {e,d}] ] + %% end || Partition <- [ [{c,a}] ] + %% end || Partition <- [ [{c,a}], [{c,b}, {a, b}] ] + %% end || Partition <- [ [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}], + %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {b,c}], + %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {c,d}] ] + %% end || Partition <- [ [{a,b}], + %% [{a,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], + %% [{a,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], + %% [{a,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], + %% [{a,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] + ], + %% exit(end_experiment), + + io:format(user, "\nSET partitions = []\n", []), + io:format(user, "We should see convergence to 1 correct chain.\n", []), + machi_partition_simulator:no_partitions(), + [DoIt(50, 10, 100) || _ <- [1]], + io:format(user, "Sweet, finishing early\n", []), exit(yoyoyo_testing_hack), + %% WARNING: In asymmetric partitions, private_projections_are_stable() + %% will never be true; code beyond this point on the -exp3 + %% branch is bit-rotted, sorry! + true = private_projections_are_stable(Namez, DoIt), + io:format(user, "~s\n", [os:cmd("date")]), + + %% We are stable now ... analyze it. + + %% Create a report where at least one FLU has written a + %% private projection. + Report = unanimous_report(Namez), + %% ?D(Report), + + %% Report is ordered by Epoch. For each private projection + %% written during any given epoch, confirm that all chain + %% members appear in only one unique chain, i.e., the sets of + %% unique chains are disjoint. + true = all_reports_are_disjoint(Report), + + %% Given the report, we flip it around so that we observe the + %% sets of chain transitions relative to each FLU. + R_Chains = [extract_chains_relative_to_flu(FLU, Report) || + FLU <- All_list], + %% ?D(R_Chains), + R_Projs = [{FLU, [chain_to_projection(FLU, Epoch, UPI, Repairing, + All_list) || + {Epoch, UPI, Repairing} <- E_Chains]} || + {FLU, E_Chains} <- R_Chains], + + %% For each chain transition experienced by a particular FLU, + %% confirm that each state transition is OK. + try + [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane(Ps, FLU)} || + {FLU, Ps} <- R_Projs], + io:format(user, "\nAll sanity checks pass, hooray!\n", []) + catch _Err:_What -> + io:format(user, "Report ~p\n", [Report]), + exit({line, ?LINE, _Err, _What}) + end, + %% ?D(R_Projs), + + ok + after + [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez], + [ok = machi_flu0:stop(FLUPid) || {_, FLUPid} <- Namez], + ok = machi_partition_simulator:stop() + end. + +private_projections_are_stable(Namez, PollFunc) -> + Private1 = [machi_flu0:proj_get_latest_num(FLU, private) || + {_Name, FLU} <- Namez], + PollFunc(5, 1, 10), + Private2 = [machi_flu0:proj_get_latest_num(FLU, private) || + {_Name, FLU} <- Namez], + true = (Private1 == Private2). + +all_hosed_lists_are_identical(Namez, Partition0) -> + Partition = lists:usort(Partition0), + Ps = [machi_flu0:proj_read_latest(FLU, private) || {_Name, FLU} <- Namez], + UniqueAllHoseds = lists:usort([machi_chain_manager1:get_all_hosed(P) || + {ok, P} <- Ps]), + Members = [M || {M, _Pid} <- Namez], + Islands = machi_partition_simulator:partitions2num_islands( + Members, Partition), + %% io:format(user, "all_hosed_lists_are_identical:\n", []), + %% io:format(user, " Uniques = ~p Islands ~p\n Partition ~p\n", + %% [Uniques, Islands, Partition]), + case length(UniqueAllHoseds) of + 1 -> + true; + %% TODO: With the addition of the digraph stuff below, the clause + %% below probably isn't necessary anymore, since the + %% digraph calculation should catch complete partition islands? + _ when Islands == 'many' -> + %% There are at least two partitions, so yes, it's quite + %% possible that the all_hosed lists may differ. + %% TODO Fix this up to be smarter about fully-isolated + %% islands of partition. + true; + _ -> + DG = digraph:new(), + Connection = machi_partition_simulator:partition2connection( + Members, Partition), + [digraph:add_vertex(DG, X) || X <- Members], + [digraph:add_edge(DG, X, Y) || {X,Y} <- Connection], + Any = + lists:any( + fun(X) -> + NotX = Members -- [X], + lists:any( + fun(Y) -> + %% There must be a shortest path of length + %% two in both directions, otherwise + %% the read projection call will fail. + %% And it's that failure that we're + %% interested in here. + XtoY = digraph:get_short_path(DG, X, Y), + YtoX = digraph:get_short_path(DG, Y, X), + (XtoY == false orelse + length(XtoY) > 2) + orelse + (YtoX == false orelse + length(YtoX) > 2) + end, NotX) + end, Members), + digraph:delete(DG), + if Any == true -> + %% There's a missing path of length 2 between some + %% two FLUs, so yes, there's going to be + %% non-identical all_hosed lists. + true; + true -> + false % There's no excuse, buddy + end + end. + +-endif. % not PULSE +-endif. % TEST diff --git a/test/machi_partition_simulator.erl b/test/machi_partition_simulator.erl new file mode 100644 index 0000000..7ef70a3 --- /dev/null +++ b/test/machi_partition_simulator.erl @@ -0,0 +1,239 @@ +%% ------------------------------------------------------------------- +%% +%% Machi: a small village of replicated files +%% +%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(machi_partition_simulator). + +-behaviour(gen_server). + +-ifdef(TEST). + +-ifdef(EQC). +-include_lib("eqc/include/eqc.hrl"). +-endif. +-ifdef(PULSE). +-compile({parse_transform, pulse_instrument}). +-endif. + +-export([start_link/3, stop/0, + get/1, reset_thresholds/2, + no_partitions/0, always_last_partitions/0, always_these_partitions/1]). +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-export([islands2partitions/1, + partition2connection/2, + connection2partition/2, + partitions2num_islands/2, + partition_list_is_symmetric_p/2]). + +-define(TAB, ?MODULE). + +-record(state, { + seed, + old_partitions, + old_threshold, + no_partition_threshold, + method=oneway_partitions :: 'island' | 'oneway_partitions' + }). + +start_link(Seed, OldThreshold, NoPartitionThreshold) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, + {Seed, OldThreshold, NoPartitionThreshold}, []). + +stop() -> + gen_server:call(?MODULE, {stop}, infinity). + +get(Nodes) -> + gen_server:call(?MODULE, {get, Nodes}, infinity). + +reset_thresholds(OldThreshold, NoPartitionThreshold) -> + gen_server:call(?MODULE, {reset_thresholds, OldThreshold, NoPartitionThreshold}, infinity). + +no_partitions() -> + reset_thresholds(-999, 999). + +always_last_partitions() -> + reset_thresholds(999, 0). + +always_these_partitions(Parts) -> + reset_thresholds(999, 0), + gen_server:call(?MODULE, {always_these_partitions, Parts}, infinity). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +init({Seed, OldThreshold, NoPartitionThreshold}) -> + {ok, #state{seed=Seed, + old_partitions={[],[[]]}, + old_threshold=OldThreshold, + no_partition_threshold=NoPartitionThreshold}}. + +handle_call({get, Nodes}, _From, S) -> + {Seed2, Partitions} = + calc_network_partitions(S#state.method, + Nodes, + S#state.seed, + S#state.old_partitions, + S#state.old_threshold, + S#state.no_partition_threshold), + {reply, Partitions, S#state{seed=Seed2, + old_partitions=Partitions}}; +handle_call({reset_thresholds, OldThreshold, NoPartitionThreshold}, _From, S) -> + {reply, ok, S#state{old_threshold=OldThreshold, + no_partition_threshold=NoPartitionThreshold}}; +handle_call({always_these_partitions, Parts}, _From, S) -> + {reply, ok, S#state{old_partitions={Parts,[na_reset_by_always]}}}; +handle_call({stop}, _From, S) -> + {stop, normal, ok, S}. + +handle_cast(_Cast, S) -> + {noreply, S}. + +handle_info(_Info, S) -> + {noreply, S}. + +terminate(_Reason, _S) -> + ok. + +code_change(_OldVsn, S, _Extra) -> + {ok, S}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +calc_network_partitions(Method, Nodes, Seed1, OldPartition, + OldThreshold, NoPartitionThreshold) -> + {Cutoff2, Seed2} = random:uniform_s(100, Seed1), + if Cutoff2 < OldThreshold -> + {Seed2, OldPartition}; + true -> + {Cutoff3, Seed3} = random:uniform_s(100, Seed1), + if Cutoff3 < NoPartitionThreshold -> + {Seed3, {[], [Nodes]}}; + true -> + make_network_partition_locations(Method, Nodes, Seed3) + end + end. + +make_network_partition_locations(island=_Method, Nodes, Seed1) -> + Num = length(Nodes), + {Seed2, WeightsNodes} = lists:foldl( + fun(Node, {Seeda, Acc}) -> + {Cutoff0, Seedb} = + random:uniform_s(100, Seeda), + Cutoff = erlang:max( + 2, if Cutoff0 rem 4 == 0 -> + 0; + true -> + Cutoff0 + end), + {Seedb, [{Cutoff, Node}|Acc]} + end, {Seed1, []}, Nodes), + IslandSep = 100 div Num, + Islands = [ + lists:sort([Nd || {Weight, Nd} <- WeightsNodes, + (Max - IslandSep) =< Weight, Weight < Max]) + || Max <- lists:seq(IslandSep + 1, 105, IslandSep)], + {Seed2, {lists:usort(islands2partitions(Islands)), lists:sort(Islands)}}; +make_network_partition_locations(oneway_partitions=_Method, Nodes, Seed1) -> + Pairs = make_all_pairs(Nodes), + Num = length(Pairs), + {Seed2, Weights} = lists:foldl( + fun(_, {Seeda, Acc}) -> + {Cutoff, Seedb} = random:uniform_s(100, Seeda), + {Seedb, [Cutoff|Acc]} + end, {Seed1, []}, lists:seq(1, Num)), + {Cutoff3, Seed3} = random:uniform_s(100, Seed2), + {Seed3, {[X || {Weight, X} <- lists:zip(Weights, Pairs), + Weight < Cutoff3], [islands_not_supported]}}. + +make_all_pairs(L) -> + lists:flatten(make_all_pairs2(lists:usort(L))). + +make_all_pairs2([]) -> + []; +make_all_pairs2([_]) -> + []; +make_all_pairs2([H1|T]) -> + [[{H1, X}, {X, H1}] || X <- T] ++ make_all_pairs(T). + +islands2partitions([]) -> + []; +islands2partitions([Island|Rest]) -> + [{X,Y} || X <- Island, + Y <- lists:append(Rest), X /= Y] + ++ + [{Y,X} || X <- Island, + Y <- lists:append(Rest), X /= Y] + ++ + islands2partitions(Rest). + +partition2connection(Members0, Partition0) -> + p2c_invert(lists:usort(Members0), lists:usort(Partition0)). + +connection2partition(Members0, Partition0) -> + p2c_invert(lists:usort(Members0), lists:usort(Partition0)). + +p2c_invert(Members, Partition_list_Or_Connection_list) -> + All = [{X,Y} || X <- Members, Y <- Members, X /= Y], + All -- Partition_list_Or_Connection_list. + +partitions2num_islands(Members0, Partition0) -> + %% Ignore duplicates in either arg, if any. + Members = lists:usort(Members0), + Partition = lists:usort(Partition0), + + Connections = partition2connection(Members, Partition), + Cs = [lists:member({X,Y}, Connections) + orelse + lists:member({Y,X}, Connections) || X <- Members, Y <- Members, + X /= Y], + case lists:usort(Cs) of + [true] -> 1; + [false, true] -> many % TODO too lazy to finish + end. + +partition_list_is_symmetric_p(Members0, Partition0) -> + %% %% Ignore duplicates in either arg, if any. + Members = lists:usort(Members0), + NumMembers = length(Members), + Partition = lists:usort(Partition0), + + NewDict = lists:foldl( + fun({A,B}, Dict) -> + Key = if A > B -> {A,B}; + true -> {B,A} + end, + orddict:update_counter(Key, 1, Dict) + end, orddict:new(), Partition), + AllOddP = orddict:fold( + fun(_Key, Count, true) when Count rem 2 == 0 -> + true; + (_, _, _) -> + false + end, true, NewDict), + if not AllOddP -> + false; + true -> + TwosCount = [Key || {Key, Count} <- orddict:to_list(NewDict), + Count == 2], + length(TwosCount) >= (NumMembers - 1) + end. + +-endif. % TEST From a79f385fa718a1cf553bf591610e390176455edf Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Mon, 6 Apr 2015 15:49:47 +0900 Subject: [PATCH 02/22] Fix type problem for return of get_latest_epoch --- src/machi_flu1_client.erl | 4 +-- src/machi_projection_store.erl | 49 ++++++++++++++++++++-------------- test/machi_flu1_test.erl | 3 ++- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/src/machi_flu1_client.erl b/src/machi_flu1_client.erl index 6dd6c65..8850a0c 100644 --- a/src/machi_flu1_client.erl +++ b/src/machi_flu1_client.erl @@ -154,7 +154,7 @@ list_files(Host, TcpPort, EpochID) when is_integer(TcpPort) -> %% @doc Get the latest epoch number from the FLU's projection store. -spec get_latest_epoch(port(), projection_type()) -> - {ok, -1|non_neg_integer()} | {error, term()}. + {ok, epoch_id()} | {error, term()}. get_latest_epoch(Sock, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> get_latest_epoch2(Sock, ProjType). @@ -163,7 +163,7 @@ get_latest_epoch(Sock, ProjType) -spec get_latest_epoch(inet_host(), inet_port(), projection_type()) -> - {ok, -1|non_neg_integer()} | {error, term()}. + {ok, epoch_id()} | {error, term()}. get_latest_epoch(Host, TcpPort, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> Sock = machi_util:connect(Host, TcpPort), diff --git a/src/machi_projection_store.erl b/src/machi_projection_store.erl index c88a21b..d53ecc4 100644 --- a/src/machi_projection_store.erl +++ b/src/machi_projection_store.erl @@ -42,8 +42,8 @@ private_dir = "" :: string(), wedged = true :: boolean(), wedge_notify_pid :: pid() | atom(), - max_public_epoch = -1 :: -1 | non_neg_integer(), - max_private_epoch = -1 :: -1 | non_neg_integer() + max_public_epoch = {-1,<<>>} :: -1 | non_neg_integer(), + max_private_epoch = {-1,<<>>} :: -1 | non_neg_integer() }). start_link(RegName, DataDir, NotifyWedgeStateChanges) -> @@ -124,16 +124,16 @@ init([DataDir, NotifyWedgeStateChanges]) -> handle_call({{get_latest_epoch, ProjType}, LC1}, _From, S) -> LC2 = lclock_update(LC1), - Epoch = if ProjType == public -> S#state.max_public_epoch; - ProjType == private -> S#state.max_private_epoch - end, - {reply, {{ok, Epoch}, LC2}, S}; + EpochT = if ProjType == public -> S#state.max_public_epoch; + ProjType == private -> S#state.max_private_epoch + end, + {reply, {{ok, EpochT}, LC2}, S}; handle_call({{read_latest_projection, ProjType}, LC1}, _From, S) -> LC2 = lclock_update(LC1), - Epoch = if ProjType == public -> S#state.max_public_epoch; - ProjType == private -> S#state.max_private_epoch + {EpochNum, _CSum} = if ProjType == public -> S#state.max_public_epoch; + ProjType == private -> S#state.max_private_epoch end, - {Reply, NewS} = do_proj_read(ProjType, Epoch, S), + {Reply, NewS} = do_proj_read(ProjType, EpochNum, S), {reply, {Reply, LC2}, NewS}; handle_call({{read, ProjType, Epoch}, LC1}, _From, S) -> LC2 = lclock_update(LC1), @@ -176,17 +176,21 @@ code_change(_OldVsn, S, _Extra) -> do_proj_read(_ProjType, Epoch, S) when Epoch < 0 -> {{error, not_written}, S}; -do_proj_read(ProjType, Epoch, S) -> - Dir = pick_path(ProjType, S), +do_proj_read(ProjType, Epoch, S_or_Dir) -> + Dir = if is_record(S_or_Dir, state) -> + pick_path(ProjType, S_or_Dir); + is_list(S_or_Dir) -> + S_or_Dir + end, Path = filename:join(Dir, epoch2name(Epoch)), case file:read_file(Path) of {ok, Bin} -> %% TODO and if Bin is corrupt? (even if binary_to_term() succeeds) - {{ok, binary_to_term(Bin)}, S}; + {{ok, binary_to_term(Bin)}, S_or_Dir}; {error, enoent} -> - {{error, not_written}, S}; + {{error, not_written}, S_or_Dir}; {error, Else} -> - {{error, Else}, S} + {{error, Else}, S_or_Dir} end. do_proj_write(ProjType, #projection_v1{epoch_number=Epoch}=Proj, S) -> @@ -201,12 +205,15 @@ do_proj_write(ProjType, #projection_v1{epoch_number=Epoch}=Proj, S) -> ok = file:write(FH, term_to_binary(Proj)), ok = file:sync(FH), ok = file:close(FH), - NewS = if ProjType == public, Epoch > S#state.max_public_epoch -> + EpochT = {Epoch, Proj}, + NewS = if ProjType == public, + Epoch > element(1, S#state.max_public_epoch) -> io:format(user, "TODO: tell ~p we are wedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), - S#state{max_public_epoch=Epoch, wedged=true}; - ProjType == private, Epoch > S#state.max_private_epoch -> + S#state{max_public_epoch=EpochT, wedged=true}; + ProjType == private, + Epoch > element(1, S#state.max_private_epoch) -> io:format(user, "TODO: tell ~p we are unwedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), - S#state{max_private_epoch=Epoch, wedged=false}; + S#state{max_private_epoch=EpochT, wedged=false}; true -> S end, @@ -233,9 +240,11 @@ find_all(Dir) -> find_max_epoch(Dir) -> Fs = lists:sort(filelib:wildcard("*", Dir)), if Fs == [] -> - -1; + {-1, <<>>}; true -> - name2epoch(lists:last(Fs)) + EpochNum = name2epoch(lists:last(Fs)), + {{ok, Proj}, _} = do_proj_read(proj_type_ignored, EpochNum, Dir), + {EpochNum, Proj} end. %%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/test/machi_flu1_test.erl b/test/machi_flu1_test.erl index 136d6d0..c37188c 100644 --- a/test/machi_flu1_test.erl +++ b/test/machi_flu1_test.erl @@ -125,7 +125,7 @@ flu_projection_smoke_test() -> FLU1 = setup_test_flu(projection_test_flu, TcpPort, DataDir), try [begin - {ok, -1} = ?FLU_C:get_latest_epoch(Host, TcpPort, T), + {ok, {-1,_}} = ?FLU_C:get_latest_epoch(Host, TcpPort, T), {error, not_written} = ?FLU_C:read_latest_projection(Host, TcpPort, T), {ok, []} = ?FLU_C:list_all(Host, TcpPort, T), @@ -135,6 +135,7 @@ flu_projection_smoke_test() -> ok = ?FLU_C:write_projection(Host, TcpPort, T, P1), {error, written} = ?FLU_C:write_projection(Host, TcpPort, T, P1), {ok, P1} = ?FLU_C:read_projection(Host, TcpPort, T, 1), + {ok, {1,_}} = ?FLU_C:get_latest_epoch(Host, TcpPort, T), {ok, P1} = ?FLU_C:read_latest_projection(Host, TcpPort, T), {ok, [1]} = ?FLU_C:list_all(Host, TcpPort, T), {ok, [P1]} = ?FLU_C:get_all(Host, TcpPort, T), From 1d63b93fc00e0d62e9cf51528ce858f6ff059e27 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Mon, 6 Apr 2015 16:49:17 +0900 Subject: [PATCH 03/22] Kill append, projection, and listen pids on machi_flu1:stop() --- src/machi_flu1.erl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/machi_flu1.erl b/src/machi_flu1.erl index 02f7925..3d71ce4 100644 --- a/src/machi_flu1.erl +++ b/src/machi_flu1.erl @@ -46,7 +46,7 @@ start_link([{FluName, TcpPort, DataDir}|Rest]) stop(Pid) -> case erlang:is_process_alive(Pid) of true -> - Pid ! forever, + Pid ! killme, ok; false -> error @@ -86,7 +86,11 @@ main2(RegName, TcpPort, DataDir, Rest) -> put(flu_append_pid, AppendPid), put(flu_projection_pid, ProjectionPid), put(flu_listen_pid, ListenPid), - receive forever -> ok end. + receive killme -> ok end, + (catch exit(AppendPid, kill)), + (catch exit(ProjectionPid, kill)), + (catch exit(ListenPid, kill)), + ok. start_listen_server(S) -> spawn_link(fun() -> run_listen_server(S) end). From 16e283fe5b6b9d6b1c4bc055cab417730960c3a5 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Mon, 6 Apr 2015 18:43:52 +0900 Subject: [PATCH 04/22] API overhaul, add machi_proxy_flu1_client.erl, add chain manager (tests commented out) --- src/machi_flu1.erl | 8 +- src/machi_flu1_client.erl | 136 +++++++----- src/machi_projection_store.erl | 34 +-- src/machi_proxy_flu1_client.erl | 344 +++++++++++++++++++++++++++++ src/machi_util.erl | 18 +- test/machi_chain_manager1_test.erl | 4 +- test/machi_flu1_test.erl | 15 +- 7 files changed, 472 insertions(+), 87 deletions(-) create mode 100644 src/machi_proxy_flu1_client.erl diff --git a/src/machi_flu1.erl b/src/machi_flu1.erl index 3d71ce4..bd34ff5 100644 --- a/src/machi_flu1.erl +++ b/src/machi_flu1.erl @@ -580,12 +580,12 @@ handle_projection_command({read_projection, ProjType, Epoch}, handle_projection_command({write_projection, ProjType, Proj}, #state{proj_store=ProjStore}) -> machi_projection_store:write(ProjStore, ProjType, Proj); -handle_projection_command({get_all, ProjType}, +handle_projection_command({get_all_projections, ProjType}, #state{proj_store=ProjStore}) -> - machi_projection_store:get_all(ProjStore, ProjType); -handle_projection_command({list_all, ProjType}, + machi_projection_store:get_all_projections(ProjStore, ProjType); +handle_projection_command({list_all_projections, ProjType}, #state{proj_store=ProjStore}) -> - machi_projection_store:list_all(ProjStore, ProjType); + machi_projection_store:list_all_projections(ProjStore, ProjType); handle_projection_command(Else, _S) -> {error, unknown_cmd, Else}. diff --git a/src/machi_flu1_client.erl b/src/machi_flu1_client.erl index 8850a0c..570c9fa 100644 --- a/src/machi_flu1_client.erl +++ b/src/machi_flu1_client.erl @@ -35,8 +35,8 @@ read_latest_projection/2, read_latest_projection/3, read_projection/3, read_projection/4, write_projection/3, write_projection/4, - get_all/2, get_all/3, - list_all/2, list_all/3, + get_all_projections/2, get_all_projections/3, + list_all_projections/2, list_all_projections/3, %% Common API quit/1 @@ -54,7 +54,7 @@ -type chunk_pos() :: {file_offset(), chunk_size(), file_name_s()}. -type chunk_size() :: non_neg_integer(). -type epoch_csum() :: binary(). --type epoch_num() :: non_neg_integer(). +-type epoch_num() :: -1 | non_neg_integer(). -type epoch_id() :: {epoch_num(), epoch_csum()}. -type file_info() :: {file_size(), file_name_s()}. -type file_name() :: binary() | list(). @@ -243,44 +243,44 @@ write_projection(Host, TcpPort, ProjType, Proj) %% @doc Get all projections from the FLU's projection store. --spec get_all(port(), projection_type()) -> +-spec get_all_projections(port(), projection_type()) -> {ok, [projection()]} | {error, term()}. -get_all(Sock, ProjType) +get_all_projections(Sock, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> - get_all2(Sock, ProjType). + get_all_projections2(Sock, ProjType). %% @doc Get all projections from the FLU's projection store. --spec get_all(inet_host(), inet_port(), +-spec get_all_projections(inet_host(), inet_port(), projection_type()) -> {ok, [projection()]} | {error, term()}. -get_all(Host, TcpPort, ProjType) +get_all_projections(Host, TcpPort, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> Sock = machi_util:connect(Host, TcpPort), try - get_all2(Sock, ProjType) + get_all_projections2(Sock, ProjType) after catch gen_tcp:close(Sock) end. %% @doc Get all epoch numbers from the FLU's projection store. --spec list_all(port(), projection_type()) -> +-spec list_all_projections(port(), projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}. -list_all(Sock, ProjType) +list_all_projections(Sock, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> - list_all2(Sock, ProjType). + list_all_projections2(Sock, ProjType). %% @doc Get all epoch numbers from the FLU's projection store. --spec list_all(inet_host(), inet_port(), +-spec list_all_projections(inet_host(), inet_port(), projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}. -list_all(Host, TcpPort, ProjType) +list_all_projections(Host, TcpPort, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> Sock = machi_util:connect(Host, TcpPort), try - list_all2(Sock, ProjType) + list_all_projections2(Sock, ProjType) after catch gen_tcp:close(Sock) end. @@ -365,6 +365,7 @@ trunc_hack(Host, TcpPort, EpochID, File) when is_integer(TcpPort) -> %%%%%%%%%%%%%%%%%%%%%%%%%%% append_chunk2(Sock, EpochID, Prefix0, Chunk0) -> + erase(bad_sock), try %% TODO: add client-side checksum to the server's protocol %% _ = crypto:hash(md5, Chunk), @@ -391,47 +392,59 @@ append_chunk2(Sock, EpochID, Prefix0, Chunk0) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch, erlang:get_stacktrace()}} end. read_chunk2(Sock, EpochID, File0, Offset, Size) -> - {EpochNum, EpochCSum} = EpochID, - EpochIDRaw = <>, - File = machi_util:make_binary(File0), - PrefixHex = machi_util:int_to_hexbin(Offset, 64), - SizeHex = machi_util:int_to_hexbin(Size, 32), - CmdLF = [$R, 32, EpochIDRaw, PrefixHex, SizeHex, File, 10], - ok = gen_tcp:send(Sock, CmdLF), - case gen_tcp:recv(Sock, 3) of - {ok, <<"OK\n">>} -> - {ok, _Chunk}=Res = gen_tcp:recv(Sock, Size), - Res; - {ok, Else} -> - {ok, OldOpts} = inet:getopts(Sock, [packet]), - ok = inet:setopts(Sock, [{packet, line}]), - {ok, Else2} = gen_tcp:recv(Sock, 0), - ok = inet:setopts(Sock, OldOpts), - case Else of - <<"ERA">> -> - {error, todo_erasure_coded}; %% escript_cc_parse_ec_info(Sock, Line, Else2); - <<"ERR">> -> - case Else2 of - <<"OR BAD-IO\n">> -> - {error, no_such_file}; - <<"OR NOT-ERASURE\n">> -> - {error, no_such_file}; - <<"OR BAD-ARG\n">> -> - {error, bad_arg}; - <<"OR PARTIAL-READ\n">> -> - {error, partial_read}; - _ -> - {error, Else2} - end; - _ -> - {error, {whaaa, <>}} - end + erase(bad_sock), + try + {EpochNum, EpochCSum} = EpochID, + EpochIDRaw = <>, + File = machi_util:make_binary(File0), + PrefixHex = machi_util:int_to_hexbin(Offset, 64), + SizeHex = machi_util:int_to_hexbin(Size, 32), + CmdLF = [$R, 32, EpochIDRaw, PrefixHex, SizeHex, File, 10], + ok = gen_tcp:send(Sock, CmdLF), + case gen_tcp:recv(Sock, 3) of + {ok, <<"OK\n">>} -> + {ok, _Chunk}=Res = gen_tcp:recv(Sock, Size), + Res; + {ok, Else} -> + {ok, OldOpts} = inet:getopts(Sock, [packet]), + ok = inet:setopts(Sock, [{packet, line}]), + {ok, Else2} = gen_tcp:recv(Sock, 0), + ok = inet:setopts(Sock, OldOpts), + case Else of + <<"ERA">> -> + {error, todo_erasure_coded}; %% escript_cc_parse_ec_info(Sock, Line, Else2); + <<"ERR">> -> + case Else2 of + <<"OR BAD-IO\n">> -> + {error, no_such_file}; + <<"OR NOT-ERASURE\n">> -> + {error, no_such_file}; + <<"OR BAD-ARG\n">> -> + {error, bad_arg}; + <<"OR PARTIAL-READ\n">> -> + {error, partial_read}; + _ -> + {error, Else2} + end; + _ -> + {error, {whaaa_todo, <>}} + end + end + catch + throw:Error -> + put(bad_sock, Sock), + Error; + error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), + {error, {badmatch, BadMatch, erlang:get_stacktrace()}} end. list2(Sock, EpochID) -> @@ -462,6 +475,7 @@ list3(Else, _Sock) -> throw({server_protocol_error, Else}). checksum_list2(Sock, EpochID, File) -> + erase(bad_sock), try {EpochNum, EpochCSum} = EpochID, EpochIDRaw = <>, @@ -484,8 +498,10 @@ checksum_list2(Sock, EpochID, File) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch}} end. @@ -515,6 +531,7 @@ checksum_list_finish(Chunks) -> Line /= <<>>]. write_chunk2(Sock, EpochID, File0, Offset, Chunk0) -> + erase(bad_sock), try {EpochNum, EpochCSum} = EpochID, EpochIDRaw = <>, @@ -542,12 +559,15 @@ write_chunk2(Sock, EpochID, File0, Offset, Chunk0) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch, erlang:get_stacktrace()}} end. delete_migration2(Sock, EpochID, File) -> + erase(bad_sock), try {EpochNum, EpochCSum} = EpochID, EpochIDRaw = <>, @@ -566,12 +586,15 @@ delete_migration2(Sock, EpochID, File) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch}} end. trunc_hack2(Sock, EpochID, File) -> + erase(bad_sock), try {EpochNum, EpochCSum} = EpochID, EpochIDRaw = <>, @@ -590,8 +613,10 @@ trunc_hack2(Sock, EpochID, File) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch}} end. @@ -611,15 +636,16 @@ write_projection2(Sock, ProjType, Proj) -> ProjCmd = {write_projection, ProjType, Proj}, do_projection_common(Sock, ProjCmd). -get_all2(Sock, ProjType) -> - ProjCmd = {get_all, ProjType}, +get_all_projections2(Sock, ProjType) -> + ProjCmd = {get_all_projections, ProjType}, do_projection_common(Sock, ProjCmd). -list_all2(Sock, ProjType) -> - ProjCmd = {list_all, ProjType}, +list_all_projections2(Sock, ProjType) -> + ProjCmd = {list_all_projections, ProjType}, do_projection_common(Sock, ProjCmd). do_projection_common(Sock, ProjCmd) -> + erase(bad_sock), try ProjCmdBin = term_to_binary(ProjCmd), Len = iolist_size(ProjCmdBin), @@ -641,7 +667,9 @@ do_projection_common(Sock, ProjCmd) -> end catch throw:Error -> + put(bad_sock, Sock), Error; error:{badmatch,_}=BadMatch -> + put(bad_sock, Sock), {error, {badmatch, BadMatch, erlang:get_stacktrace()}} end. diff --git a/src/machi_projection_store.erl b/src/machi_projection_store.erl index d53ecc4..09555d2 100644 --- a/src/machi_projection_store.erl +++ b/src/machi_projection_store.erl @@ -29,21 +29,23 @@ read_latest_projection/2, read_latest_projection/3, read/3, read/4, write/3, write/4, - get_all/2, get_all/3, - list_all/2, list_all/3 + get_all_projections/2, get_all_projections/3, + list_all_projections/2, list_all_projections/3 ]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). +-define(NO_EPOCH, {-1,<<0:(20*8)/big>>}). + -record(state, { public_dir = "" :: string(), private_dir = "" :: string(), wedged = true :: boolean(), wedge_notify_pid :: pid() | atom(), - max_public_epoch = {-1,<<>>} :: -1 | non_neg_integer(), - max_private_epoch = {-1,<<>>} :: -1 | non_neg_integer() + max_public_epoch = ?NO_EPOCH :: {-1 | non_neg_integer(), binary()}, + max_private_epoch = ?NO_EPOCH :: {-1 | non_neg_integer(), binary()} }). start_link(RegName, DataDir, NotifyWedgeStateChanges) -> @@ -82,19 +84,19 @@ write(PidSpec, ProjType, Proj, Timeout) Proj#projection_v1.epoch_number >= 0 -> g_call(PidSpec, {write, ProjType, Proj}, Timeout). -get_all(PidSpec, ProjType) -> - get_all(PidSpec, ProjType, infinity). +get_all_projections(PidSpec, ProjType) -> + get_all_projections(PidSpec, ProjType, infinity). -get_all(PidSpec, ProjType, Timeout) +get_all_projections(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> - g_call(PidSpec, {get_all, ProjType}, Timeout). + g_call(PidSpec, {get_all_projections, ProjType}, Timeout). -list_all(PidSpec, ProjType) -> - list_all(PidSpec, ProjType, infinity). +list_all_projections(PidSpec, ProjType) -> + list_all_projections(PidSpec, ProjType, infinity). -list_all(PidSpec, ProjType, Timeout) +list_all_projections(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> - g_call(PidSpec, {list_all, ProjType}, Timeout). + g_call(PidSpec, {list_all_projections, ProjType}, Timeout). %%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -143,7 +145,7 @@ handle_call({{write, ProjType, Proj}, LC1}, _From, S) -> LC2 = lclock_update(LC1), {Reply, NewS} = do_proj_write(ProjType, Proj, S), {reply, {Reply, LC2}, NewS}; -handle_call({{get_all, ProjType}, LC1}, _From, S) -> +handle_call({{get_all_projections, ProjType}, LC1}, _From, S) -> LC2 = lclock_update(LC1), Dir = pick_path(ProjType, S), Epochs = find_all(Dir), @@ -152,7 +154,7 @@ handle_call({{get_all, ProjType}, LC1}, _From, S) -> Proj end || Epoch <- Epochs], {reply, {{ok, All}, LC2}, S}; -handle_call({{list_all, ProjType}, LC1}, _From, S) -> +handle_call({{list_all_projections, ProjType}, LC1}, _From, S) -> LC2 = lclock_update(LC1), Dir = pick_path(ProjType, S), {reply, {{ok, find_all(Dir)}, LC2}, S}; @@ -205,7 +207,7 @@ do_proj_write(ProjType, #projection_v1{epoch_number=Epoch}=Proj, S) -> ok = file:write(FH, term_to_binary(Proj)), ok = file:sync(FH), ok = file:close(FH), - EpochT = {Epoch, Proj}, + EpochT = {Epoch, Proj#projection_v1.epoch_csum}, NewS = if ProjType == public, Epoch > element(1, S#state.max_public_epoch) -> io:format(user, "TODO: tell ~p we are wedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), @@ -240,7 +242,7 @@ find_all(Dir) -> find_max_epoch(Dir) -> Fs = lists:sort(filelib:wildcard("*", Dir)), if Fs == [] -> - {-1, <<>>}; + ?NO_EPOCH; true -> EpochNum = name2epoch(lists:last(Fs)), {{ok, Proj}, _} = do_proj_read(proj_type_ignored, EpochNum, Dir), diff --git a/src/machi_proxy_flu1_client.erl b/src/machi_proxy_flu1_client.erl new file mode 100644 index 0000000..5222fb9 --- /dev/null +++ b/src/machi_proxy_flu1_client.erl @@ -0,0 +1,344 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(machi_proxy_flu1_client). + +-behaviour(gen_server). + +-include("machi.hrl"). +-include("machi_projection.hrl"). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. % TEST. + +-export([start_link/1]). +%% FLU1 API +-export([ + %% File API + append_chunk/4, append_chunk/5, + read_chunk/5, read_chunk/6, + checksum_list/3, checksum_list/4, + list_files/2, list_files/3, + + %% %% Projection API + get_latest_epoch/2, get_latest_epoch/3, + read_latest_projection/2, read_latest_projection/3, + read_projection/3, read_projection/4, + write_projection/3, write_projection/4, + get_all_projections/2, get_all_projections/3, + list_all_projections/2, list_all_projections/3, + + %% Common API + quit/1 + ]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +-define(FLU_C, machi_flu1_client). + +-record(state, { + i :: #p_srvr{}, + sock :: 'undefined' | port() + }). + +start_link(#p_srvr{}=I) -> + gen_server:start_link(?MODULE, [I], []). + +append_chunk(PidSpec, EpochID, Prefix, Chunk) -> + append_chunk(PidSpec, EpochID, Prefix, Chunk, infinity). + +append_chunk(PidSpec, EpochID, Prefix, Chunk, Timeout) -> + gen_server:call(PidSpec, {req, {append_chunk, EpochID, Prefix, Chunk}}, + Timeout). + +read_chunk(PidSpec, EpochID, File, Offset, Size) -> + read_chunk(PidSpec, EpochID, File, Offset, Size, infinity). + +read_chunk(PidSpec, EpochID, File, Offset, Size, Timeout) -> + gen_server:call(PidSpec, {req, {read_chunk, EpochID, File, Offset, Size}}, + Timeout). + +checksum_list(PidSpec, EpochID, File) -> + checksum_list(PidSpec, EpochID, File, infinity). + +checksum_list(PidSpec, EpochID, File, Timeout) -> + gen_server:call(PidSpec, {req, {checksum_list, EpochID, File}}, + Timeout). + +list_files(PidSpec, EpochID) -> + list_files(PidSpec, EpochID, infinity). + +list_files(PidSpec, EpochID, Timeout) -> + gen_server:call(PidSpec, {req, {list_files, EpochID}}, + Timeout). + +get_latest_epoch(PidSpec, ProjType) -> + get_latest_epoch(PidSpec, ProjType, infinity). + +get_latest_epoch(PidSpec, ProjType, Timeout) -> + gen_server:call(PidSpec, {req, {get_latest_epoch, ProjType}}, + Timeout). + +read_latest_projection(PidSpec, ProjType) -> + read_latest_projection(PidSpec, ProjType, infinity). + +read_latest_projection(PidSpec, ProjType, Timeout) -> + gen_server:call(PidSpec, {req, {read_latest_projection, ProjType}}, + Timeout). + +read_projection(PidSpec, ProjType, Epoch) -> + read_projection(PidSpec, ProjType, Epoch, infinity). + +read_projection(PidSpec, ProjType, Epoch, Timeout) -> + gen_server:call(PidSpec, {req, {read_projection, ProjType, Epoch}}, + Timeout). + +write_projection(PidSpec, ProjType, Proj) -> + write_projection(PidSpec, ProjType, Proj, infinity). + +write_projection(PidSpec, ProjType, Proj, Timeout) -> + gen_server:call(PidSpec, {req, {write_projection, ProjType, Proj}}, + Timeout). + +get_all_projections(PidSpec, ProjType) -> + get_all_projections(PidSpec, ProjType, infinity). + +get_all_projections(PidSpec, ProjType, Timeout) -> + gen_server:call(PidSpec, {req, {get_all_projections, ProjType}}, + Timeout). + +list_all_projections(PidSpec, ProjType) -> + list_all_projections(PidSpec, ProjType, infinity). + +list_all_projections(PidSpec, ProjType, Timeout) -> + gen_server:call(PidSpec, {req, {list_all_projections, ProjType}}, + Timeout). + +quit(PidSpec) -> + gen_server:call(PidSpec, quit, infinity). + +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +init([I]) -> + S0 = #state{i=I}, + S1 = try_connect(S0), + {ok, S1}. + +handle_call({req, Req}, _From, S) -> + {Reply, NewS} = do_req(Req, S), + {reply, Reply, NewS}; +handle_call(quit, _From, S) -> + {stop, normal, ok, disconnect(S)}; +handle_call(_Request, _From, S) -> + Reply = ok, + {reply, Reply, S}. + +handle_cast(_Msg, S) -> + {noreply, S}. + +handle_info(_Info, S) -> + {noreply, S}. + +terminate(_Reason, _S) -> + ok. + +code_change(_OldVsn, S, _Extra) -> + {ok, S}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +do_req(Req, S) -> + S2 = try_connect(S), + Fun = make_req_fun(Req, S2), + case connected_p(S2) of + true -> + case Fun() of + T when element(1, T) == ok -> + {T, S2}; + Else -> + case get(bad_sock) of + Bad when Bad == S2#state.sock -> + {Else, disconnect(S2)}; + _ -> + {Else, S2} + end + end; + false -> + {{error, not_connected}, S2} + end. + +make_req_fun({append_chunk, EpochID, Prefix, Chunk}, #state{sock=Sock}) -> + fun() -> ?FLU_C:append_chunk(Sock, EpochID, Prefix, Chunk) end; +make_req_fun({read_chunk, EpochID, File, Offset, Size}, #state{sock=Sock}) -> + fun() -> ?FLU_C:read_chunk(Sock, EpochID, File, Offset, Size) end; +make_req_fun({checksum_list, EpochID, File}, #state{sock=Sock}) -> + fun() -> ?FLU_C:checksum_list(Sock, EpochID, File) end; +make_req_fun({list_files, EpochID}, #state{sock=Sock}) -> + fun() -> ?FLU_C:list_files(Sock, EpochID) end; +make_req_fun({get_latest_epoch, ProjType}, #state{sock=Sock}) -> + fun() -> ?FLU_C:get_latest_epoch(Sock, ProjType) end; +make_req_fun({read_latest_projection, ProjType}, #state{sock=Sock}) -> + fun() -> ?FLU_C:read_latest_projection(Sock, ProjType) end; +make_req_fun({read_projection, ProjType, Epoch}, #state{sock=Sock}) -> + fun() -> ?FLU_C:read_projection(Sock, ProjType, Epoch) end; +make_req_fun({write_projection, ProjType, Proj}, #state{sock=Sock}) -> + fun() -> ?FLU_C:write_projection(Sock, ProjType, Proj) end; +make_req_fun({get_all_projections, ProjType}, #state{sock=Sock}) -> + fun() -> ?FLU_C:get_all_projections(Sock, ProjType) end; +make_req_fun({list_all_projections, ProjType}, #state{sock=Sock}) -> + fun() -> ?FLU_C:list_all_projections(Sock, ProjType) end. + +connected_p(#state{sock=SockMaybe, + i=#p_srvr{proto=ipv4}=_I}=_S) -> + is_port(SockMaybe); +connected_p(#state{i=#p_srvr{proto=disterl, + name=_NodeName}=_I}=_S) -> + true. + %% case net_adm:ping(NodeName) of + %% ping -> + %% true; + %% _ -> + %% false + %% end. + +try_connect(#state{sock=undefined, + i=#p_srvr{proto=ipv4, address=Host, port=TcpPort}=_I}=S) -> + try + Sock = machi_util:connect(Host, TcpPort), + S#state{sock=Sock} + catch + _:_ -> + S + end; +try_connect(S) -> + %% If we're connection-based, we're already connected. + %% If we're not connection-based, then there's nothing to do. + S. + +disconnect(#state{sock=Sock, + i=#p_srvr{proto=ipv4}=_I}=S) -> + (catch gen_tcp:close(Sock)), + S#state{sock=undefined}; +disconnect(S) -> + S. + +%%%%%%%%%%%%%%%%%%%%%%%%%%% + +-ifdef(TEST). + +dummy_server(Parent, TcpPort) -> + spawn_link(fun() -> + {ok, LSock} = gen_tcp:listen(TcpPort, + [{reuseaddr,true}, + {packet, line}, + {mode, binary}, + {active, false}]), + dummy_ack(Parent), + {ok, Sock} = gen_tcp:accept(LSock), + ok = inet:setopts(Sock, [{packet, line}]), + {ok, _Line} = gen_tcp:recv(Sock, 0), + ok = gen_tcp:send(Sock, "ERROR BADARG\n"), + (catch gen_tcp:close(Sock)), + unlink(Parent), + exit(normal) + end). + +dummy_ack(Parent) -> + Parent ! go. + +dummy_wait_for_ack() -> + receive go -> ok end. + +smoke_test() -> + TcpPort = 57123, + Me = self(), + _ServerPid = dummy_server(Me, TcpPort), + dummy_wait_for_ack(), + + I = #p_srvr{name=smoke, proto=ipv4, address="localhost", port=TcpPort}, + S0 = #state{i=I}, + false = connected_p(S0), + S1 = try_connect(S0), + true = connected_p(S1), + gen_tcp:send(S1#state.sock, "yo dawg\n"), + {ok, _Answer} = gen_tcp:recv(S1#state.sock, 0), + _S2 = disconnect(S1), + + ok. + +api_smoke_test() -> + RegName = api_smoke_flu, + Host = "localhost", + TcpPort = 57124, + DataDir = "./data.api_smoke_flu", + FLU1 = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir), + erase(flu_pid), + + try + I = #p_srvr{name=RegName, proto=ipv4, address=Host, port=TcpPort}, + {ok, Prox1} = start_link(I), + try + FakeEpoch = {-1, <<0:(20*8)/big>>}, + [{ok, {_,_,_}} = append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity) || _ <- lists:seq(1,5)], + %% Stop the FLU, what happens? + machi_flu1:stop(FLU1), + {error,_} = append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity), + {error,not_connected} = append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity), + %% Start the FLU again, we should be able to do stuff immediately + FLU1b = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir, + [save_data_dir]), + put(flu_pid, FLU1b), + MyChunk = <<"my chunk data">>, + {ok, {MyOff,MySize,MyFile}} = + append_chunk(Prox1, FakeEpoch, <<"prefix">>, MyChunk, + infinity), + {ok, MyChunk} = read_chunk(Prox1, FakeEpoch, MyFile, MyOff, MySize), + + %% Alright, now for the rest of the API, whee + BadFile = <<"no-such-file">>, + {error, no_such_file} = checksum_list(Prox1, FakeEpoch, BadFile), + {ok, [_]} = list_files(Prox1, FakeEpoch), + {ok, FakeEpoch} = get_latest_epoch(Prox1, public), + {error, not_written} = read_latest_projection(Prox1, public), + {error, not_written} = read_projection(Prox1, public, 44), + P1 = machi_projection:new(1, a, [a], [], [a], [], []), + ok = write_projection(Prox1, public, P1), + {ok, P1} = read_projection(Prox1, public, 1), + {ok, [P1]} = get_all_projections(Prox1, public), + {ok, [1]} = list_all_projections(Prox1, public), + ok + after + _ = (catch quit(Prox1)) + end + after + (catch machi_flu1:stop(FLU1)), + (catch machi_flu1:stop(get(flu_pid))) + end. + +-endif. % TEST diff --git a/src/machi_util.erl b/src/machi_util.erl index 1331d11..af0ac29 100644 --- a/src/machi_util.erl +++ b/src/machi_util.erl @@ -31,7 +31,7 @@ read_max_filenum/2, increment_max_filenum/2, info_msg/2, verb/1, verb/2, %% TCP protocol helpers - connect/2 + connect/2, connect/3 ]). -compile(export_all). @@ -168,13 +168,19 @@ info_msg(Fmt, Args) -> -spec connect(inet:ip_address() | inet:hostname(), inet:port_number()) -> port(). connect(Host, Port) -> - escript_connect(Host, Port). + escript_connect(Host, Port, 4500). -escript_connect(Host, PortStr) when is_list(PortStr) -> +-spec connect(inet:ip_address() | inet:hostname(), inet:port_number(), + timeout()) -> + port(). +connect(Host, Port, Timeout) -> + escript_connect(Host, Port, Timeout). + +escript_connect(Host, PortStr, Timeout) when is_list(PortStr) -> Port = list_to_integer(PortStr), - escript_connect(Host, Port); -escript_connect(Host, Port) when is_integer(Port) -> + escript_connect(Host, Port, Timeout); +escript_connect(Host, Port, Timeout) when is_integer(Port) -> {ok, Sock} = gen_tcp:connect(Host, Port, [{active,false}, {mode,binary}, - {packet, raw}]), + {packet, raw}], Timeout), Sock. diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index def16c7..5f4367a 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -133,13 +133,13 @@ chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> -ifndef(PULSE). -smoke0_test() -> +smoke0_testXXX() -> {ok, _} = machi_partition_simulator:start_link({1,2,3}, 50, 50), Host = "localhost", TcpPort = 6623, {ok, FLUa} = machi_flu1:start_link([{a,TcpPort,"./data.a"}]), {ok, M0} = ?MGR:start_link(a, [a,b,c], a), - SockA = machi_util:connect(Host, TcpPort), + _SockA = machi_util:connect(Host, TcpPort), try pong = ?MGR:ping(M0) after diff --git a/test/machi_flu1_test.erl b/test/machi_flu1_test.erl index c37188c..fbfc0ae 100644 --- a/test/machi_flu1_test.erl +++ b/test/machi_flu1_test.erl @@ -33,7 +33,12 @@ setup_test_flu(RegName, TcpPort, DataDir) -> setup_test_flu(RegName, TcpPort, DataDir, []). setup_test_flu(RegName, TcpPort, DataDir, DbgProps) -> - clean_up_data_dir(DataDir), + case proplists:get_value(save_data_dir, DbgProps) of + true -> + ok; + _ -> + clean_up_data_dir(DataDir) + end, {ok, FLU1} = ?FLU:start_link([{RegName, TcpPort, DataDir}, {dbg, DbgProps}]), @@ -128,8 +133,8 @@ flu_projection_smoke_test() -> {ok, {-1,_}} = ?FLU_C:get_latest_epoch(Host, TcpPort, T), {error, not_written} = ?FLU_C:read_latest_projection(Host, TcpPort, T), - {ok, []} = ?FLU_C:list_all(Host, TcpPort, T), - {ok, []} = ?FLU_C:get_all(Host, TcpPort, T), + {ok, []} = ?FLU_C:list_all_projections(Host, TcpPort, T), + {ok, []} = ?FLU_C:get_all_projections(Host, TcpPort, T), P1 = machi_projection:new(1, a, [a], [], [a], [], []), ok = ?FLU_C:write_projection(Host, TcpPort, T, P1), @@ -137,8 +142,8 @@ flu_projection_smoke_test() -> {ok, P1} = ?FLU_C:read_projection(Host, TcpPort, T, 1), {ok, {1,_}} = ?FLU_C:get_latest_epoch(Host, TcpPort, T), {ok, P1} = ?FLU_C:read_latest_projection(Host, TcpPort, T), - {ok, [1]} = ?FLU_C:list_all(Host, TcpPort, T), - {ok, [P1]} = ?FLU_C:get_all(Host, TcpPort, T), + {ok, [1]} = ?FLU_C:list_all_projections(Host, TcpPort, T), + {ok, [P1]} = ?FLU_C:get_all_projections(Host, TcpPort, T), {error, not_written} = ?FLU_C:read_projection(Host, TcpPort, T, 2) end || T <- [public, private] ] after From 0e38eddaa99305af04846df865c6330344934e71 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Mon, 6 Apr 2015 20:07:39 +0900 Subject: [PATCH 05/22] WIP: baby step, machi_chain_manager1_test:smoke0_test() works --- src/machi_chain_manager1.erl | 11 +++++++---- test/machi_chain_manager1_test.erl | 14 ++++++++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 5f662f6..c554621 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -35,6 +35,9 @@ -define(D(X), io:format(user, "~s ~p\n", [??X, X])). -define(Dw(X), io:format(user, "~s ~w\n", [??X, X])). +-define(FLU_C, machi_flu1_client). +-define(FLU_PC, machi_proxy_flu1_client). + %% Keep a history of our flowchart execution in the process dictionary. -define(REACT(T), put(react, [T|get(react)])). @@ -202,13 +205,13 @@ code_change(_OldVsn, S, _Extra) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> - case machi_flu0:proj_read_latest(MyFLU, private) of - error_unwritten -> + case ?FLU_PC:read_latest_projection(MyFLU, private) of + {error, not_written} -> Epoch = BestProj#projection_v1.epoch_number, - case machi_flu0:proj_write(MyFLU, Epoch, private, BestProj) of + case ?FLU_PC:write_projection(MyFLU, private, BestProj) of ok -> S#ch_mgr{init_finished=true, proj=BestProj}; - error_written -> + {error, not_written} -> exit({yo_impossible, ?LINE}); Else -> ?D({retry,Else}), diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 5f4367a..22a07db 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -28,7 +28,8 @@ -define(D(X), io:format(user, "~s ~p\n", [??X, X])). -define(Dw(X), io:format(user, "~s ~w\n", [??X, X])). --define(FLU_C, machi_flu1_client). +-define(FLU_C, machi_flu1_client). +-define(FLU_PC, machi_proxy_flu1_client). -export([]). @@ -133,18 +134,23 @@ chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> -ifndef(PULSE). -smoke0_testXXX() -> +smoke0_test() -> {ok, _} = machi_partition_simulator:start_link({1,2,3}, 50, 50), Host = "localhost", TcpPort = 6623, {ok, FLUa} = machi_flu1:start_link([{a,TcpPort,"./data.a"}]), - {ok, M0} = ?MGR:start_link(a, [a,b,c], a), + Pa = #p_srvr{name=a, proto=ipv4, address=Host, port=TcpPort}, + %% Egadz, more racing on startup, yay. TODO fix. + timer:sleep(1), + {ok, FLUaP} = ?FLU_PC:start_link(Pa), + {ok, M0} = ?MGR:start_link(a, [a,b,c], FLUaP), _SockA = machi_util:connect(Host, TcpPort), try pong = ?MGR:ping(M0) after ok = ?MGR:stop(M0), - ok = machi_flu0:stop(FLUa), + ok = machi_flu1:stop(FLUa), + ok = ?FLU_PC:quit(FLUaP), ok = machi_partition_simulator:stop() end. From ad872e23ca44610713c6abfbb5ce980bee728007 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Wed, 8 Apr 2015 14:24:07 +0900 Subject: [PATCH 06/22] Add first basic round of EDoc documentation, 'make edoc' target --- Makefile | 8 +- doc/overview.edoc | 170 ++++++++++++++++++ edoc/.gitignore | 1 + edoc/edoc-info | 7 + edoc/erlang.png | Bin 0 -> 2109 bytes edoc/index.html | 17 ++ edoc/machi_admin_util.html | 60 +++++++ edoc/machi_app.html | 39 +++++ edoc/machi_chain_manager1.html | 155 +++++++++++++++++ edoc/machi_chash.html | 171 ++++++++++++++++++ edoc/machi_flu1.html | 63 +++++++ edoc/machi_flu1_client.html | 278 ++++++++++++++++++++++++++++++ edoc/machi_flu_sup.html | 41 +++++ edoc/machi_projection.html | 70 ++++++++ edoc/machi_projection_store.html | 163 ++++++++++++++++++ edoc/machi_proxy_flu1_client.html | 222 ++++++++++++++++++++++++ edoc/machi_sequencer.html | 23 +++ edoc/machi_sup.html | 39 +++++ edoc/machi_util.html | 150 ++++++++++++++++ edoc/modules-frame.html | 24 +++ edoc/overview-summary.html | 185 ++++++++++++++++++++ edoc/overview.edoc | 14 ++ edoc/packages-frame.html | 11 ++ edoc/stylesheet.css | 55 ++++++ rebar.config | 1 + src/machi_admin_util.erl | 4 +- src/machi_app.erl | 2 + src/machi_chain_manager1.erl | 22 +++ src/machi_chash.erl | 10 +- src/machi_flu1.erl | 31 +++- src/machi_flu1_client.erl | 14 +- src/machi_flu_sup.erl | 3 + src/machi_projection.erl | 17 ++ src/machi_projection_store.erl | 52 ++++++ src/machi_proxy_flu1_client.erl | 66 +++++++ src/machi_sequencer.erl | 3 + src/machi_sup.erl | 2 + src/machi_util.erl | 65 +++++-- 38 files changed, 2232 insertions(+), 26 deletions(-) create mode 100644 doc/overview.edoc create mode 100644 edoc/.gitignore create mode 100644 edoc/edoc-info create mode 100644 edoc/erlang.png create mode 100644 edoc/index.html create mode 100644 edoc/machi_admin_util.html create mode 100644 edoc/machi_app.html create mode 100644 edoc/machi_chain_manager1.html create mode 100644 edoc/machi_chash.html create mode 100644 edoc/machi_flu1.html create mode 100644 edoc/machi_flu1_client.html create mode 100644 edoc/machi_flu_sup.html create mode 100644 edoc/machi_projection.html create mode 100644 edoc/machi_projection_store.html create mode 100644 edoc/machi_proxy_flu1_client.html create mode 100644 edoc/machi_sequencer.html create mode 100644 edoc/machi_sup.html create mode 100644 edoc/machi_util.html create mode 100644 edoc/modules-frame.html create mode 100644 edoc/overview-summary.html create mode 100644 edoc/overview.edoc create mode 100644 edoc/packages-frame.html create mode 100644 edoc/stylesheet.css diff --git a/Makefile b/Makefile index ba8df11..b91d653 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ ifeq ($(REBAR_BIN),) REBAR_BIN = ./rebar endif -.PHONY: rel deps package pkgclean +.PHONY: rel deps package pkgclean edoc all: deps compile @@ -21,6 +21,12 @@ test: deps compile eunit eunit: $(REBAR_BIN) -v skip_deps=true eunit +edoc: edoc-clean + $(REBAR_BIN) skip_deps=true doc + +edoc-clean: + rm -f edoc/*.png edoc/*.html edoc/*.css edoc/edoc-info + pulse: compile env USE_PULSE=1 $(REBAR_BIN) skip_deps=true clean compile env USE_PULSE=1 $(REBAR_BIN) skip_deps=true -D PULSE eunit diff --git a/doc/overview.edoc b/doc/overview.edoc new file mode 100644 index 0000000..6182f6b --- /dev/null +++ b/doc/overview.edoc @@ -0,0 +1,170 @@ + +@title Machi: a small village of replicated files + +@doc + +== About This EDoc Documentation == + +This EDoc-style documentation will concern itself only with Erlang +function APIs and function & data types. Higher-level design and +commentary will remain outside of the Erlang EDoc system; please see +the "Pointers to Other Machi Documentation" section below for more +details. + +Readers should beware that this documentation may be out-of-sync with +the source code. When in doubt, use the `make edoc' command to +regenerate all HTML pages. + +It is the developer's responsibility to re-generate the documentation +periodically and commit it to the Git repo. + +== Machi Code Overview == + +=== Chain Manager === + +The Chain Manager is responsible for managing the state of Machi's +"Chain Replication" state. This role is roughly analogous to the +"Riak Core" application inside of Riak, which takes care of +coordinating replica placement and replica repair. + +For each primitive data server in the cluster, a Machi FLU, there is a +Chain Manager process that manages its FLU's role within the Machi +cluster's Chain Replication scheme. Each Chain Manager process +executes locally and independently to manage the distributed state of +a single Machi Chain Replication chain. + +
    + +
  • To contrast with Riak Core ... Riak Core's claimant process is + solely responsible for managing certain critical aspects of + Riak Core distributed state. Machi's Chain Manager process + performs similar tasks as Riak Core's claimant. However, Machi + has several active Chain Manager processes, one per FLU server, + instead of a single active process like Core's claimant. Each + Chain Manager process acts independently; each is constrained + so that it will reach consensus via independent computation + & action. + + Full discussion of this distributed consensus is outside the + scope of this document; see the "Pointers to Other Machi + Documentation" section below for more information. +
  • +
  • Machi differs from a Riak Core application because Machi's + replica placement policy is simply, "All Machi servers store + replicas of all Machi files". + Machi is intended to be a primitive building block for creating larger + cluster-of-clusters where files are + distributed/fragmented/sharded across a large pool of + independent Machi clusters. +
  • +
  • See + [https://www.usenix.org/legacy/events/osdi04/tech/renesse.html] + for a copy of the paper, "Chain Replication for Supporting High + Throughput and Availability" by Robbert van Renesse and Fred + B. Schneider. +
  • +
+ +=== FLU === + +The FLU is the basic storage server for Machi. + +
    +
  • The name FLU is taken from "flash storage unit" from the paper + "CORFU: A Shared Log Design for Flash Clusters" by + Balakrishnan, Malkhi, Prabhakaran, and Wobber. See + [https://www.usenix.org/conference/nsdi12/technical-sessions/presentation/balakrishnan] +
  • +
  • In CORFU, the sequencer step is a prerequisite step that is + performed by a separate component, the Sequencer. + In Machi, the `append_chunk()' protocol message has + an implicit "sequencer" operation applied by the "head" of the + Machi Chain Replication chain. If a client wishes to write + data that has already been assigned a sequencer position, then + the `write_chunk()' API function is used. +
  • +
+ +For each FLU, there are three independent tasks that are implemented +using three different Erlang processes: + +
    +
  • A FLU server, implemented primarily by `machi_flu.erl'. +
  • +
  • A projection store server, implemented primarily by + `machi_projection_store.erl'. +
  • +
  • A chain state manager server, implemented primarily by + `machi_chain_manager1.erl'. +
  • +
+ +From the perspective of failure detection, it is very convenient that +all three FLU-related services (file server, sequencer server, and +projection server) are accessed using the same single TCP port. + +=== Projection (data structure) === + +The projection is a data structure that specifies the current state +of the Machi cluster: all FLUs, which FLUS are considered +up/running or down/crashed/stopped, which FLUs are actively +participants in the Chain Replication protocol, and which FLUs are +under "repair" (i.e., having their data resyncronized when +newly-added to a cluster or when restarting after a crash). + +=== Projection Store (server) === + +The projection store is a storage service that is implemented by an +Erlang/OTP `gen_server' process that is associated with each +FLU. Conceptually, the projection store is an array of +write-once registers. For each projection store register, the +key is a 2-tuple of an epoch number (`non_neg_integer()' type) +and a projection type (`public' or `private' type); the value is +a projection data structure (`projection_v1()' type). + +=== Client and Proxy Client === + +Machi is intentionally avoiding using distributed Erlang for Machi's +communication. This design decision makes Erlang-side code more +difficult & complex but allows us the freedom of implementing +parts of Machi in other languages without major +protocol&API&glue code changes later in the product's +lifetime. + +There are two layers of interface for Machi clients. + +
    +
  • The `machi_flu1_client' module implements an API that uses a + TCP socket directly. +
  • +
  • The `machi_proxy_flu1_client' module implements an API that + uses a local, long-lived `gen_server' process as a proxy for + the remote, perhaps disconnected-or-crashed Machi FLU server. +
  • +
+ +The types for both modules ought to be the same. However, due to +rapid code churn, some differences might exist. Any major difference +is (almost by definition) a bug: please open a GitHub issue to request +a correction. + +== TODO notes == + +Any use of the string "TODO" in upper/lower/mixed case, anywhere in +the code, is a reminder signal of unfinished work. + +== Pointers to Other Machi Documentation == + +
    +
  • If you are viewing this document locally, please look in the + `../doc/' directory, +
  • +
  • If you are viewing this document via the Web, please find the + documentation via this link: + [http://github.com/basho/machi/tree/master/doc/] + Please be aware that this link points to the `master' branch + of the Machi source repository and therefore may be + out-of-sync with non-`master' branch code. +
  • + +
diff --git a/edoc/.gitignore b/edoc/.gitignore new file mode 100644 index 0000000..bcd672a --- /dev/null +++ b/edoc/.gitignore @@ -0,0 +1 @@ +tmp.* diff --git a/edoc/edoc-info b/edoc/edoc-info new file mode 100644 index 0000000..f119ed0 --- /dev/null +++ b/edoc/edoc-info @@ -0,0 +1,7 @@ +%% encoding: UTF-8 +{application,machi}. +{packages,[]}. +{modules,[machi_admin_util,machi_app,machi_chain_manager1,machi_chash, + machi_flu1,machi_flu1_client,machi_flu_sup,machi_projection, + machi_projection_store,machi_proxy_flu1_client,machi_sequencer, + machi_sup,machi_util]}. diff --git a/edoc/erlang.png b/edoc/erlang.png new file mode 100644 index 0000000000000000000000000000000000000000..987a618e2403af895bfaf8c2f929e3a4f3746659 GIT binary patch literal 2109 zcmV-D2*US?P)rez_nr%N ze)-p~%6|a|LA_bA=l=$|3jjqS$tjbGG?@TN0w$Azq7Z{YeQxKcpLO55vno1^u23DP&V=i9-KAAsU*ECy^#OtaDC!lVSo!+|-%T+LhTHP^Oqwx8m)b4r3V28JmV&6M#iG)&0;P`j>XGfomEIEK6wPkhI{{K?3#uAGq$!`N_F)TNX zAvuspF?^;c9h%CPWyTDc_03%r4N8+Yzzo_VSfa!zo_7F6D?<+-+KkHwXiWQR=Mr(9|K@{{xEjfDvAbS9uNCP&{)NNCoC?XA$aRe>R8-> z5N<#S_)$d|EYpJfPC?{`$Y~f4yjH&dxHXIGG8wiaLBD6usC87cg+dd&3WLJd4_TcmEeAOz8R>ikgW(9821 z{34Se09Y?KoG<_Y;DDSoyTk>fUN0YO5)3^Za{&s1JbidC9}56{px+f|K_0;YuL5h} z_9J3y%7ucwM)E4K#=Cn7tCjjRkKjnQuiFcM6{17Jt#5F}7z8~RYqW24xV?kAU6xQN zh+h4|SmO1;TdsVOaOeD*kKf}6I7=6ZNig_rtqV?Ov1HrU(P%Hi#6npSe>%qGaNK1w zW$v+r`r0>#p~AN^8b)#7Yesu(ys(>3SCYb4sF9%A9=kMHrLmzk}E&WPG~Jx z9!r{qo5M184t;<7I`t1AsNjv912EeKkHKtOSl%wbcjFh7L6|G?Q+{?radOvuEW$>1 zoc+c&F+u$^0f}1_2dN&lS#I#p3e&+|YGHlMzRC)%&8TnGt+p*;Oz z`0=D=n|qcN+f@07;QjB@ktLhZ`+qz;(xYDli^Pex&&wwU2V4N-a3b@veqHg2cvCRb zoi=ZerLk!4t5!s3?|ARuWx_4-VCgl|TY2qa@$Dr~5QdiT8?$oPpZhaF5UOZ&x=+I9 zt((`6wBPM((BS{;2lmSB;o%z{>=mg*1k2oLjI=+zcf5$4BIZmkOrjrE z*VY(<@FO?zBVDc+Q~Lh;LnlYodZ$J3tmWJBN4j~wVOWelzexhft2nY6A3PZAcm!q} z931CL#1Ki6;HM{agTbKF>3(R-yuF1&Apn3Nh@PGvv)K$mkVqu*^z@vaFgQ3kFfg!s z^=f26@{Ny=_w@7x1qHF$bEk5X$)wR}0s{l>V!TCGM=R5Ei1Ll8u7Z*N0G1CPgB zyLPP|0H{-FRUDJv`Ea=9fX zC63D4+FBlumz$eAJv~j5q*|@^_xC?_>XiL0K@bH61$;i=&CLx(QGb8`8#iu{BnjJW zHUvSgUcK7T&~W(h;koN8t5vB~Ha0dgnane1&RA#87dVcaOpEMM)6>)E&YiPZEXBpe zlarHk89g;+G#U#E3hL_W002xT6UTApOeR%UR_5g7q^73!_4PG2Hi|@|ii(Pfi3vIY z0ES^?Mx1IOizO0?e0_a!9483k`PtCk-rm~Unwpw=?b@~O?(WdgP^bMMAYlLg{dIM9 zOy}OcxVTs%k(@q#n$PF+`TXkYYA%;cr_*5ofWcr$PEL-Ai772Db)3`L*|~G)&eqn} zq@*OrbXim`UAiO`3XdK=%H#1=D%HHV>FMbqAtCAM=@!e}C6Cc))ai5zg~H3rYjkup zD=RBMKR+`wv!kN}1^{3fR#a3}RaLcP#}20|H!^bT)~%G3lp{xu!0_{Wr2hW?>({UQ z`T1F`)|D$)*3{IP&1UDKhLn_)sMYHH{QRkzV=$M?#W2idGFh!wf*`b7ZGC-xVPT=c zV1Vs&!otFoN~M>VQ$G_G6}5No-m0pqwzjr;?W@INu~;m#k*%qz(P%VUt#;3zJ^lUt zU0q%G?%kVzvF7cqQmLw|tA~e&XIqun*x2Ug=9-!s48ty7ycil9Di(|7aybkD7#y?%lgQ z9`Ewy%eDpgxlvJ3Cr+GTFc>(F+cg;(8TPc>y?b|jeEgLwR}LLIBoqp1+1c4_HrvO? z$J5g@G&D3gIC$2ITrQ7`iwh4AfA;K|OePZu1oriTVVG1Zl}e@S)~)mK@UU1cI-Ty| z!Gj8gg2UmUD2ibif*{e+(R4bU#bU|j@{Joe^7(uSf+8X!q*7@_M1;L=AqbM3oXp{H nT3T9A6wS=c+_!HZolgHhw9g$%O4Wbp00000NkvXXu0mjf3HKBY literal 0 HcmV?d00001 diff --git a/edoc/index.html b/edoc/index.html new file mode 100644 index 0000000..be9d1af --- /dev/null +++ b/edoc/index.html @@ -0,0 +1,17 @@ + + + +The machi application + + + + + + +<h2>This page uses frames</h2> +<p>Your browser does not accept frames. +<br>You should go to the <a href="overview-summary.html">non-frame version</a> instead. +</p> + + + \ No newline at end of file diff --git a/edoc/machi_admin_util.html b/edoc/machi_admin_util.html new file mode 100644 index 0000000..744230a --- /dev/null +++ b/edoc/machi_admin_util.html @@ -0,0 +1,60 @@ + + + + +Module machi_admin_util + + + + +
+ +

Module machi_admin_util

+Machi chain replication administration utilities. + + +

Description

Machi chain replication administration utilities. +

Data Types

+ +

inet_host()

+

inet_host() = inet:ip_address() | inet:hostname()

+ + +

inet_port()

+

inet_port() = inet:port_number()

+ + +

Function Index

+ + + + +
verify_file_checksums_local/3
verify_file_checksums_local/4
verify_file_checksums_remote/3
verify_file_checksums_remote/4
+ +

Function Details

+ +

verify_file_checksums_local/3

+
+

verify_file_checksums_local(Sock1::port(), EpochID::machi_flu1_client:epoch_id(), Path::binary() | list()) -> {ok, [tuple()]} | {error, term()}

+
+ +

verify_file_checksums_local/4

+
+

verify_file_checksums_local(Host::inet_host(), TcpPort::inet_port(), EpochID::machi_flu1_client:epoch_id(), Path::binary() | list()) -> {ok, [tuple()]} | {error, term()}

+
+ +

verify_file_checksums_remote/3

+
+

verify_file_checksums_remote(Sock1::port(), EpochID::machi_flu1_client:epoch_id(), File::binary() | list()) -> {ok, [tuple()]} | {error, term()}

+
+ +

verify_file_checksums_remote/4

+
+

verify_file_checksums_remote(Host::inet_host(), TcpPort::inet_port(), EpochID::machi_flu1_client:epoch_id(), File::binary() | list()) -> {ok, [tuple()]} | {error, term()}

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_app.html b/edoc/machi_app.html new file mode 100644 index 0000000..8bba636 --- /dev/null +++ b/edoc/machi_app.html @@ -0,0 +1,39 @@ + + + + +Module machi_app + + + + +
+ +

Module machi_app

+Top-level supervisor for the Machi application. + +

Behaviours: application.

+ +

Description

Top-level supervisor for the Machi application. +

Function Index

+ + +
start/2
stop/1
+ +

Function Details

+ +

start/2

+
+

start(StartType, StartArgs) -> any()

+
+ +

stop/1

+
+

stop(State) -> any()

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_chain_manager1.html b/edoc/machi_chain_manager1.html new file mode 100644 index 0000000..e2d0bec --- /dev/null +++ b/edoc/machi_chain_manager1.html @@ -0,0 +1,155 @@ + + + + +Module machi_chain_manager1 + + + + +
+ +

Module machi_chain_manager1

+The Machi chain manager, Guardian of all things related to +Chain Replication state, status, and data replica safety. + +

Behaviours: gen_server.

+ +

Description

The Machi chain manager, Guardian of all things related to +Chain Replication state, status, and data replica safety.

+ +

The Chain Manager is responsible for managing the state of Machi's +"Chain Replication" state. This role is roughly analogous to the +"Riak Core" application inside of Riak, which takes care of +coordinating replica placement and replica repair.

+ +

For each primitive data server in the cluster, a Machi FLU, there +is a Chain Manager process that manages its FLU's role within the +Machi cluster's Chain Replication scheme. Each Chain Manager +process executes locally and independently to manage the +distributed state of a single Machi Chain Replication chain.

+ + Machi's Chain Manager process performs similar tasks as Riak Core's + claimant. However, Machi has several active Chain Manager + processes, one per FLU server, instead of a single active process + like Core's claimant. Each Chain Manager process acts + independently; each is constrained so that it will reach consensus + via independent computation & action. +

Function Index

+ + + + + + + + + + + + + + + + + + +
code_change/3
get_all_hosed/1
handle_call/3
handle_cast/2
handle_info/2
init/1
make_projection_summary/1
ping/1
projection_transitions_are_sane/2
start_link/3
start_link/4
stop/1
terminate/2
test_calc_projection/2
test_calc_proposed_projection/1
test_react_to_env/1
test_read_latest_public_projection/2
test_write_proposed_projection/1
+ +

Function Details

+ +

code_change/3

+
+

code_change(OldVsn, S, Extra) -> any()

+
+ +

get_all_hosed/1

+
+

get_all_hosed(P) -> any()

+
+ +

handle_call/3

+
+

handle_call(Call, From, Ch_mgr) -> any()

+
+ +

handle_cast/2

+
+

handle_cast(Cast, Ch_mgr) -> any()

+
+ +

handle_info/2

+
+

handle_info(Msg, S) -> any()

+
+ +

init/1

+
+

init(X1) -> any()

+
+ +

make_projection_summary/1

+
+

make_projection_summary(Projection_v1) -> any()

+
+ +

ping/1

+
+

ping(Pid) -> any()

+
+ +

projection_transitions_are_sane/2

+
+

projection_transitions_are_sane(Ps, RelativeToServer) -> any()

+
+ +

start_link/3

+
+

start_link(MyName, All_list, MyFLUPid) -> any()

+
+ +

start_link/4

+
+

start_link(MyName, All_list, MyFLUPid, MgrOpts) -> any()

+
+ +

stop/1

+
+

stop(Pid) -> any()

+
+ +

terminate/2

+
+

terminate(Reason, S) -> any()

+
+ +

test_calc_projection/2

+
+

test_calc_projection(Pid, KeepRunenvP) -> any()

+
+ +

test_calc_proposed_projection/1

+
+

test_calc_proposed_projection(Pid) -> any()

+
+ +

test_react_to_env/1

+
+

test_react_to_env(Pid) -> any()

+
+ +

test_read_latest_public_projection/2

+
+

test_read_latest_public_projection(Pid, ReadRepairP) -> any()

+
+ +

test_write_proposed_projection/1

+
+

test_write_proposed_projection(Pid) -> any()

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_chash.html b/edoc/machi_chash.html new file mode 100644 index 0000000..7f2f293 --- /dev/null +++ b/edoc/machi_chash.html @@ -0,0 +1,171 @@ + + + + +Module machi_chash + + + + +
+ +

Module machi_chash

+Consistent hashing library. + + +

Description

Consistent hashing library. Also known as "random slicing".

+ + This code was originally from the Hibari DB source code at + https://github.com/hibari +

Data Types

+ +

float_map()

+

float_map() = [{owner_name(), float()}]

+

A float map subdivides the unit interval, starting at 0.0, to + partitions that are assigned to various owners. The sum of all + floats must be exactly 1.0 (or close enough for floating point + purposes).

+ +

float_tree()

+

abstract datatype: float_tree()

+

We can't use gb_trees:tree() because 'nil' (the empty tree) is + never valid in our case. But teaching Dialyzer that is difficult.

+ +

nextfloat_list()

+

nextfloat_list() = [{float(), brick()}]

+

A nextfloat_list + differs from a float_map in two respects: 1) nextfloat_list contains + tuples with the brick name in 2nd position, 2) the float() at each + position I_n > I_m, for all n, m such that n > m. + For example, a nextfloat_list of the float_map example above, + [{0.25, {br1, nd1}}, {0.75, {br2, nd1}}, {1.0, {br3, nd1}].

+ +

owner_int_range()

+

owner_int_range() = {owner_name(), non_neg_integer(), non_neg_integer()}

+

Used when "prettying" a float map.

+ +

owner_name()

+

owner_name() = term()

+

Owner for a range on the unit interval. We are agnostic about its + type.

+ +

owner_weight()

+

owner_weight() = {owner_name(), weight()}

+ + +

owner_weight_list()

+

owner_weight_list() = [owner_weight()]

+

A owner_weight_list is a definition of brick assignments over the + unit interval [0.0, 1.0]. The sum of all floats must be 1.0. For + example, [{{br1,nd1}, 0.25}, {{br2,nd1}, 0.5}, {{br3,nd1}, 0.25}].

+ +

weight()

+

weight() = non_neg_integer()

+

For this library, a weight is an integer which specifies the + capacity of a "owner" relative to other owners. For example, if + owner A with a weight of 10, and if owner B has a weight of 20, + then B will be assigned twice as much of the unit interval as A.

+ +

Function Index

+ + + + + + + + + + + + +
hash_binary_via_float_map/2Query a float map with a binary (inefficient).
hash_binary_via_float_tree/2Query a float tree with a binary.
make_demo_map1/0Create a sample float map.
make_demo_map2/0Create a sample float map.
make_float_map/1Create a float map, based on a basic owner weight list.
make_float_map/2Create a float map, based on an older float map and a new weight +list.
make_tree/1Create a float tree, which is the rapid lookup data structure + for consistent hash queries.
pretty_with_integers/2Make a pretty/human-friendly version of a float map that describes + integer ranges between 1 and Scale.
pretty_with_integers/3Make a pretty/human-friendly version of a float map (based + upon a float map created from OldWeights and NewWeights) that + describes integer ranges between 1 and Scale.
query_tree/2Low-level function for querying a float tree: the (floating + point) point within the unit interval.
sum_map_weights/1Create a human-friendly summary of a float map.
zzz_usage_details/0Various usage examples, see source code below this function + for full details.
+ +

Function Details

+ +

hash_binary_via_float_map/2

+
+

hash_binary_via_float_map(Key::binary(), Map::float_map()) -> {float(), owner_name()}

+

Query a float map with a binary (inefficient).

+ +

hash_binary_via_float_tree/2

+
+

hash_binary_via_float_tree(Key::binary(), Tree::float_tree()) -> {float(), owner_name()}

+

Query a float tree with a binary.

+ +

make_demo_map1/0

+
+

make_demo_map1() -> float_map()

+

Create a sample float map.

+ +

make_demo_map2/0

+
+

make_demo_map2() -> float_map()

+

Create a sample float map.

+ +

make_float_map/1

+
+

make_float_map(NewOwnerWeights::owner_weight_list()) -> float_map()

+

Create a float map, based on a basic owner weight list.

+ +

make_float_map/2

+
+

make_float_map(OldFloatMap::float_map(), NewOwnerWeights::owner_weight_list()) -> float_map()

+

Create a float map, based on an older float map and a new weight +list.

+ + The weights in the new weight list may be different than (or the + same as) whatever weights were used to make the older float map.

+ +

make_tree/1

+
+

make_tree(Map::float_map()) -> float_tree()

+

Create a float tree, which is the rapid lookup data structure + for consistent hash queries.

+ +

pretty_with_integers/2

+
+

pretty_with_integers(Map::float_map(), Scale::integer()) -> [owner_int_range()]

+

Make a pretty/human-friendly version of a float map that describes + integer ranges between 1 and Scale.

+ +

pretty_with_integers/3

+
+

pretty_with_integers(OldWeights::owner_weight_list(), NewWeights::owner_weight_list(), Scale::integer()) -> [owner_int_range()]

+

Make a pretty/human-friendly version of a float map (based + upon a float map created from OldWeights and NewWeights) that + describes integer ranges between 1 and Scale.

+ +

query_tree/2

+
+

query_tree(Val::float(), Tree::float_tree()) -> {float(), owner_name()}

+

Low-level function for querying a float tree: the (floating + point) point within the unit interval.

+ +

sum_map_weights/1

+
+

sum_map_weights(Map::float_map()) -> {{per_owner, float_map()}, {weight_sum, float()}}

+

Create a human-friendly summary of a float map.

+ + The two parts of the summary are: a per-owner total of the unit + interval range(s) owned by each owner, and a total sum of all + per-owner ranges (which should be 1.0 but is not enforced).

+ +

zzz_usage_details/0

+
+

zzz_usage_details() -> any()

+

Various usage examples, see source code below this function + for full details.

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_flu1.html b/edoc/machi_flu1.html new file mode 100644 index 0000000..3ce6902 --- /dev/null +++ b/edoc/machi_flu1.html @@ -0,0 +1,63 @@ + + + + +Module machi_flu1 + + + + +
+ +

Module machi_flu1

+The Machi FLU file server + file location sequencer. + + +

Description

The Machi FLU file server + file location sequencer.

+ +

This module implements only the Machi FLU file server and its +implicit sequencer. +Please see the EDoc "Overview" for details about the FLU as a +primitive file server process vs. the larger Machi design of a FLU +as a sequencer + file server + chain manager group of processes.

+ +

For the moment, this module also implements a rudimentary TCP-based +protocol as the sole supported access method to the server, +sequencer, and projection store. Conceptually, those three +services are independent and ought to have their own protocols. As +a practical matter, there is no need for wire protocol +compatibility. Furthermore, from the perspective of failure +detection, it is very convenient that all three FLU-related +services are accessed using the same single TCP port.

+ +

The FLU is named after the CORFU server "FLU" or "FLash Unit" server.

+ + TODO There is one major missing feature in this FLU implementation: + there is no "write-once" enforcement for any position in a Machi + file. At the moment, we rely on correct behavior of the client + & the sequencer to avoid overwriting data. In the Real World, + however, all Machi file data is supposed to be exactly write-once + to avoid problems with bugs, wire protocol corruption, malicious + clients, etc. +

Function Index

+ + +
start_link/1
stop/1
+ +

Function Details

+ +

start_link/1

+
+

start_link(Rest) -> any()

+
+ +

stop/1

+
+

stop(Pid) -> any()

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_flu1_client.html b/edoc/machi_flu1_client.html new file mode 100644 index 0000000..f1f6203 --- /dev/null +++ b/edoc/machi_flu1_client.html @@ -0,0 +1,278 @@ + + + + +Module machi_flu1_client + + + + +
+ +

Module machi_flu1_client

+Erlang API for the Machi FLU TCP protocol version 1. + + +

Description

Erlang API for the Machi FLU TCP protocol version 1. +

Data Types

+ +

chunk()

+

chunk() = binary() | iolist()

+

client can use either

+ +

chunk_csum()

+

chunk_csum() = {file_offset(), chunk_size(), binary()}

+ + +

chunk_pos()

+

chunk_pos() = {file_offset(), chunk_size(), file_name_s()}

+ + +

chunk_s()

+

chunk_s() = binary()

+

server always uses binary()

+ +

chunk_size()

+

chunk_size() = non_neg_integer()

+ + +

epoch_csum()

+

epoch_csum() = binary()

+ + +

epoch_id()

+

epoch_id() = {epoch_num(), epoch_csum()}

+ + +

epoch_num()

+

epoch_num() = -1 | non_neg_integer()

+ + +

file_info()

+

file_info() = {file_size(), file_name_s()}

+ + +

file_name()

+

file_name() = binary() | list()

+ + +

file_name_s()

+

file_name_s() = binary()

+

server reply

+ +

file_offset()

+

file_offset() = non_neg_integer()

+ + +

file_prefix()

+

file_prefix() = binary() | list()

+ + +

file_size()

+

file_size() = non_neg_integer()

+ + +

inet_host()

+

inet_host() = inet:ip_address() | inet:hostname()

+ + +

inet_port()

+

inet_port() = inet:port_number()

+ + +

projection()

+

projection() = #projection_v1{}

+ + +

projection_type()

+

projection_type() = public | private

+ + +

Function Index

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
append_chunk/4Append a chunk (binary- or iolist-style) of data to a file + with Prefix.
append_chunk/5Append a chunk (binary- or iolist-style) of data to a file + with Prefix.
checksum_list/3Fetch the list of chunk checksums for File.
checksum_list/4Fetch the list of chunk checksums for File.
delete_migration/3Restricted API: Delete a file after it has been successfully + migrated.
delete_migration/4Restricted API: Delete a file after it has been successfully + migrated.
get_all_projections/2Get all projections from the FLU's projection store.
get_all_projections/3Get all projections from the FLU's projection store.
get_latest_epoch/2Get the latest epoch number + checksum from the FLU's projection store.
get_latest_epoch/3Get the latest epoch number + checksum from the FLU's projection store.
list_all_projections/2Get all epoch numbers from the FLU's projection store.
list_all_projections/3Get all epoch numbers from the FLU's projection store.
list_files/2Fetch the list of all files on the remote FLU.
list_files/3Fetch the list of all files on the remote FLU.
quit/1Quit & close the connection to remote FLU.
read_chunk/5Read a chunk of data of size Size from File at Offset.
read_chunk/6Read a chunk of data of size Size from File at Offset.
read_latest_projection/2Get the latest projection from the FLU's projection store for ProjType
read_latest_projection/3Get the latest projection from the FLU's projection store for ProjType
read_projection/3Read a projection Proj of type ProjType.
read_projection/4Read a projection Proj of type ProjType.
trunc_hack/3Restricted API: Truncate a file after it has been successfully + erasure coded.
trunc_hack/4Restricted API: Truncate a file after it has been successfully + erasure coded.
write_chunk/5Restricted API: Write a chunk of already-sequenced data to + File at Offset.
write_chunk/6Restricted API: Write a chunk of already-sequenced data to + File at Offset.
write_projection/3Write a projection Proj of type ProjType.
write_projection/4Write a projection Proj of type ProjType.
+ +

Function Details

+ +

append_chunk/4

+
+

append_chunk(Sock::port(), EpochID::epoch_id(), Prefix::file_prefix(), Chunk::chunk()) -> {ok, chunk_pos()} | {error, term()}

+

Append a chunk (binary- or iolist-style) of data to a file + with Prefix.

+ +

append_chunk/5

+
+

append_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), Prefix::file_prefix(), Chunk::chunk()) -> {ok, chunk_pos()} | {error, term()}

+

Append a chunk (binary- or iolist-style) of data to a file + with Prefix.

+ +

checksum_list/3

+
+

checksum_list(Sock::port(), EpochID::epoch_id(), File::file_name()) -> {ok, [chunk_csum()]} | {error, term()}

+

Fetch the list of chunk checksums for File.

+ +

checksum_list/4

+
+

checksum_list(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> {ok, [chunk_csum()]} | {error, term()}

+

Fetch the list of chunk checksums for File.

+ +

delete_migration/3

+
+

delete_migration(Sock::port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

+

Restricted API: Delete a file after it has been successfully + migrated.

+ +

delete_migration/4

+
+

delete_migration(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

+

Restricted API: Delete a file after it has been successfully + migrated.

+ +

get_all_projections/2

+
+

get_all_projections(Sock::port(), ProjType::projection_type()) -> {ok, [projection()]} | {error, term()}

+

Get all projections from the FLU's projection store.

+ +

get_all_projections/3

+
+

get_all_projections(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, [projection()]} | {error, term()}

+

Get all projections from the FLU's projection store.

+ +

get_latest_epoch/2

+
+

get_latest_epoch(Sock::port(), ProjType::projection_type()) -> {ok, epoch_id()} | {error, term()}

+

Get the latest epoch number + checksum from the FLU's projection store.

+ +

get_latest_epoch/3

+
+

get_latest_epoch(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, epoch_id()} | {error, term()}

+

Get the latest epoch number + checksum from the FLU's projection store.

+ +

list_all_projections/2

+
+

list_all_projections(Sock::port(), ProjType::projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}

+

Get all epoch numbers from the FLU's projection store.

+ +

list_all_projections/3

+
+

list_all_projections(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}

+

Get all epoch numbers from the FLU's projection store.

+ +

list_files/2

+
+

list_files(Sock::port(), EpochID::epoch_id()) -> {ok, [file_info()]} | {error, term()}

+

Fetch the list of all files on the remote FLU.

+ +

list_files/3

+
+

list_files(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id()) -> {ok, [file_info()]} | {error, term()}

+

Fetch the list of all files on the remote FLU.

+ +

quit/1

+
+

quit(Sock::port()) -> ok

+

Quit & close the connection to remote FLU.

+ +

read_chunk/5

+
+

read_chunk(Sock::port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Size::chunk_size()) -> {ok, chunk_s()} | {error, term()}

+

Read a chunk of data of size Size from File at Offset.

+ +

read_chunk/6

+
+

read_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Size::chunk_size()) -> {ok, chunk_s()} | {error, term()}

+

Read a chunk of data of size Size from File at Offset.

+ +

read_latest_projection/2

+
+

read_latest_projection(Sock::port(), ProjType::projection_type()) -> {ok, projection()} | {error, not_written} | {error, term()}

+

Get the latest projection from the FLU's projection store for ProjType

+ +

read_latest_projection/3

+
+

read_latest_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, projection()} | {error, not_written} | {error, term()}

+

Get the latest projection from the FLU's projection store for ProjType

+ +

read_projection/3

+
+

read_projection(Sock::port(), ProjType::projection_type(), Epoch::epoch_num()) -> {ok, projection()} | {error, written} | {error, term()}

+

Read a projection Proj of type ProjType.

+ +

read_projection/4

+
+

read_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type(), Epoch::epoch_num()) -> {ok, projection()} | {error, written} | {error, term()}

+

Read a projection Proj of type ProjType.

+ +

trunc_hack/3

+
+

trunc_hack(Sock::port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

+

Restricted API: Truncate a file after it has been successfully + erasure coded.

+ +

trunc_hack/4

+
+

trunc_hack(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

+

Restricted API: Truncate a file after it has been successfully + erasure coded.

+ +

write_chunk/5

+
+

write_chunk(Sock::port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Chunk::chunk()) -> ok | {error, term()}

+

Restricted API: Write a chunk of already-sequenced data to + File at Offset.

+ +

write_chunk/6

+
+

write_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Chunk::chunk()) -> ok | {error, term()}

+

Restricted API: Write a chunk of already-sequenced data to + File at Offset.

+ +

write_projection/3

+
+

write_projection(Sock::port(), ProjType::projection_type(), Proj::projection()) -> ok | {error, written} | {error, term()}

+

Write a projection Proj of type ProjType.

+ +

write_projection/4

+
+

write_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type(), Proj::projection()) -> ok | {error, written} | {error, term()}

+

Write a projection Proj of type ProjType.

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_flu_sup.html b/edoc/machi_flu_sup.html new file mode 100644 index 0000000..46a3345 --- /dev/null +++ b/edoc/machi_flu_sup.html @@ -0,0 +1,41 @@ + + + + +Module machi_flu_sup + + + + +
+ +

Module machi_flu_sup

+Supervisor for Machi FLU servers and their related support + servers. + +

Behaviours: supervisor.

+ +

Description

Supervisor for Machi FLU servers and their related support + servers. +

Function Index

+ + +
init/1
start_link/0
+ +

Function Details

+ +

init/1

+
+

init(X1) -> any()

+
+ +

start_link/0

+
+

start_link() -> any()

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_projection.html b/edoc/machi_projection.html new file mode 100644 index 0000000..59f9229 --- /dev/null +++ b/edoc/machi_projection.html @@ -0,0 +1,70 @@ + + + + +Module machi_projection + + + + +
+ +

Module machi_projection

+API for manipulating Machi projection data structures (i.e., records). + + +

Description

API for manipulating Machi projection data structures (i.e., records). +

Function Index

+ + + + + + + +
compare/2Compare two projection records for equality (assuming that the + checksum element has been correctly calculated).
make_projection_summary/1Create a proplist-style summary of a projection record.
new/6Create a new projection record.
new/7Create a new projection record.
new/8Create a new projection record.
update_projection_checksum/1Update the checksum element of a projection record.
update_projection_dbg2/2Update the dbg2 element of a projection record.
+ +

Function Details

+ +

compare/2

+
+

compare(Projection_v1::#projection_v1{}, Projection_v1::#projection_v1{}) -> integer()

+

Compare two projection records for equality (assuming that the + checksum element has been correctly calculated).

+ +

make_projection_summary/1

+
+

make_projection_summary(Projection_v1) -> any()

+

Create a proplist-style summary of a projection record.

+ +

new/6

+
+

new(MyName, All_list, UPI_list, Down_list, Repairing_list, Ps) -> any()

+

Create a new projection record.

+ +

new/7

+
+

new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, Dbg) -> any()

+

Create a new projection record.

+ +

new/8

+
+

new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, Dbg, Dbg2) -> any()

+

Create a new projection record.

+ +

update_projection_checksum/1

+
+

update_projection_checksum(P) -> any()

+

Update the checksum element of a projection record.

+ +

update_projection_dbg2/2

+
+

update_projection_dbg2(P, Dbg2) -> any()

+

Update the dbg2 element of a projection record.

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_projection_store.html b/edoc/machi_projection_store.html new file mode 100644 index 0000000..ed25ba3 --- /dev/null +++ b/edoc/machi_projection_store.html @@ -0,0 +1,163 @@ + + + + +Module machi_projection_store + + + + +
+ +

Module machi_projection_store

+The Machi write-once projection store service. + + +

Description

The Machi write-once projection store service.

+ +

This API is gen_server-style message passing, intended for use +within a single Erlang node to glue together the projection store +server with the node-local process that implements Machi's TCP +client access protocol (on the "server side" of the TCP connection).

+ +

All Machi client access to the projection store SHOULD NOT use this +module's API.

+ + The projection store is implemented by an Erlang/OTP gen_server + process that is associated with each FLU. Conceptually, the + projection store is an array of write-once registers. For each + projection store register, the key is a 2-tuple of an epoch number + (non_neg_integer() type) and a projection type (public or + private type); the value is a projection data structure + (projection_v1() type). +

Function Index

+ + + + + + + + + + + + + + + + + + + +
code_change/3
get_all_projections/2Fetch all projection records of type ProjType.
get_all_projections/3Fetch all projection records of type ProjType.
get_latest_epoch/2Fetch the latest epoch number + checksum for type ProjType.
get_latest_epoch/3Fetch the latest epoch number + checksum for type ProjType.
handle_call/3
handle_cast/2
handle_info/2
init/1
list_all_projections/2Fetch all projection epoch numbers of type ProjType.
list_all_projections/3Fetch all projection epoch numbers of type ProjType.
read/3Fetch the projection record type ProjType for epoch number Epoch .
read/4Fetch the projection record type ProjType for epoch number Epoch .
read_latest_projection/2Fetch the latest projection record for type ProjType.
read_latest_projection/3Fetch the latest projection record for type ProjType.
start_link/3Start a new projection store server.
terminate/2
write/3Write the projection record type ProjType for epoch number Epoch .
write/4Write the projection record type ProjType for epoch number Epoch .
+ +

Function Details

+ +

code_change/3

+
+

code_change(OldVsn, S, Extra) -> any()

+
+ +

get_all_projections/2

+
+

get_all_projections(PidSpec, ProjType) -> any()

+

Fetch all projection records of type ProjType.

+ +

get_all_projections/3

+
+

get_all_projections(PidSpec, ProjType, Timeout) -> any()

+

Fetch all projection records of type ProjType.

+ +

get_latest_epoch/2

+
+

get_latest_epoch(PidSpec, ProjType) -> any()

+

Fetch the latest epoch number + checksum for type ProjType.

+ +

get_latest_epoch/3

+
+

get_latest_epoch(PidSpec, ProjType, Timeout) -> any()

+

Fetch the latest epoch number + checksum for type ProjType. + projection.

+ +

handle_call/3

+
+

handle_call(Request, From, S) -> any()

+
+ +

handle_cast/2

+
+

handle_cast(Msg, S) -> any()

+
+ +

handle_info/2

+
+

handle_info(Info, S) -> any()

+
+ +

init/1

+
+

init(X1) -> any()

+
+ +

list_all_projections/2

+
+

list_all_projections(PidSpec, ProjType) -> any()

+

Fetch all projection epoch numbers of type ProjType.

+ +

list_all_projections/3

+
+

list_all_projections(PidSpec, ProjType, Timeout) -> any()

+

Fetch all projection epoch numbers of type ProjType.

+ +

read/3

+
+

read(PidSpec, ProjType, Epoch) -> any()

+

Fetch the projection record type ProjType for epoch number Epoch .

+ +

read/4

+
+

read(PidSpec, ProjType, Epoch, Timeout) -> any()

+

Fetch the projection record type ProjType for epoch number Epoch .

+ +

read_latest_projection/2

+
+

read_latest_projection(PidSpec, ProjType) -> any()

+

Fetch the latest projection record for type ProjType.

+ +

read_latest_projection/3

+
+

read_latest_projection(PidSpec, ProjType, Timeout) -> any()

+

Fetch the latest projection record for type ProjType.

+ +

start_link/3

+
+

start_link(RegName, DataDir, NotifyWedgeStateChanges) -> any()

+

Start a new projection store server.

+ + The DataDir argument should be the same directory as specified + for use by our companion FLU data server -- all file system paths + used by this server are intended to be stored underneath a common + file system parent directory as the FLU data server & sequencer + servers.

+ +

terminate/2

+
+

terminate(Reason, S) -> any()

+
+ +

write/3

+
+

write(PidSpec, ProjType, Proj) -> any()

+

Write the projection record type ProjType for epoch number Epoch .

+ +

write/4

+
+

write(PidSpec, ProjType, Proj, Timeout) -> any()

+

Write the projection record type ProjType for epoch number Epoch .

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_proxy_flu1_client.html b/edoc/machi_proxy_flu1_client.html new file mode 100644 index 0000000..4cf46f4 --- /dev/null +++ b/edoc/machi_proxy_flu1_client.html @@ -0,0 +1,222 @@ + + + + +Module machi_proxy_flu1_client + + + + +
+ +

Module machi_proxy_flu1_client

+Erlang API for the Machi FLU TCP protocol version 1, with a +proxy-process style API for hiding messy details such as TCP +connection/disconnection with the remote Machi server. + +

Behaviours: gen_server.

+ +

Description

Erlang API for the Machi FLU TCP protocol version 1, with a +proxy-process style API for hiding messy details such as TCP +connection/disconnection with the remote Machi server.

+ +

Machi is intentionally avoiding using distributed Erlang for + Machi's communication. This design decision makes Erlang-side code + more difficult & complex, but it's the price to pay for some +language independence. Later in Machi's life cycle, we need to +(re-)implement some components in a non-Erlang/BEAM-based language.

+ + This module implements a "man in the middle" proxy between the + Erlang client and Machi server (which is on the "far side" of a TCP + connection to somewhere). This proxy process will always execute + on the same Erlang node as the Erlang client that uses it. The + proxy is intended to be a stable, long-lived process that survives + TCP communication problems with the remote server. +

Function Index

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
append_chunk/4Append a chunk (binary- or iolist-style) of data to a file + with Prefix.
append_chunk/5Append a chunk (binary- or iolist-style) of data to a file + with Prefix.
checksum_list/3Fetch the list of chunk checksums for File.
checksum_list/4Fetch the list of chunk checksums for File.
code_change/3
get_all_projections/2Get all projections from the FLU's projection store.
get_all_projections/3Get all projections from the FLU's projection store.
get_latest_epoch/2Get the latest epoch number + checksum from the FLU's projection store.
get_latest_epoch/3Get the latest epoch number + checksum from the FLU's projection store.
handle_call/3
handle_cast/2
handle_info/2
init/1
list_all_projections/2Get all epoch numbers from the FLU's projection store.
list_all_projections/3Get all epoch numbers from the FLU's projection store.
list_files/2Fetch the list of all files on the remote FLU.
list_files/3Fetch the list of all files on the remote FLU.
quit/1Quit & close the connection to remote FLU and stop our + proxy process.
read_chunk/5Read a chunk of data of size Size from File at Offset.
read_chunk/6Read a chunk of data of size Size from File at Offset.
read_latest_projection/2Get the latest projection from the FLU's projection store for ProjType
read_latest_projection/3Get the latest projection from the FLU's projection store for ProjType
read_projection/3Read a projection Proj of type ProjType.
read_projection/4Read a projection Proj of type ProjType.
start_link/1Start a local, long-lived process that will be our steady + & reliable communication proxy with the fickle & flaky + remote Machi server.
terminate/2
write_projection/3Write a projection Proj of type ProjType.
write_projection/4Write a projection Proj of type ProjType.
+ +

Function Details

+ +

append_chunk/4

+
+

append_chunk(PidSpec, EpochID, Prefix, Chunk) -> any()

+

Append a chunk (binary- or iolist-style) of data to a file + with Prefix.

+ +

append_chunk/5

+
+

append_chunk(PidSpec, EpochID, Prefix, Chunk, Timeout) -> any()

+

Append a chunk (binary- or iolist-style) of data to a file + with Prefix.

+ +

checksum_list/3

+
+

checksum_list(PidSpec, EpochID, File) -> any()

+

Fetch the list of chunk checksums for File.

+ +

checksum_list/4

+
+

checksum_list(PidSpec, EpochID, File, Timeout) -> any()

+

Fetch the list of chunk checksums for File.

+ +

code_change/3

+
+

code_change(OldVsn, S, Extra) -> any()

+
+ +

get_all_projections/2

+
+

get_all_projections(PidSpec, ProjType) -> any()

+

Get all projections from the FLU's projection store.

+ +

get_all_projections/3

+
+

get_all_projections(PidSpec, ProjType, Timeout) -> any()

+

Get all projections from the FLU's projection store.

+ +

get_latest_epoch/2

+
+

get_latest_epoch(PidSpec, ProjType) -> any()

+

Get the latest epoch number + checksum from the FLU's projection store.

+ +

get_latest_epoch/3

+
+

get_latest_epoch(PidSpec, ProjType, Timeout) -> any()

+

Get the latest epoch number + checksum from the FLU's projection store.

+ +

handle_call/3

+
+

handle_call(Request, From, S) -> any()

+
+ +

handle_cast/2

+
+

handle_cast(Msg, S) -> any()

+
+ +

handle_info/2

+
+

handle_info(Info, S) -> any()

+
+ +

init/1

+
+

init(X1) -> any()

+
+ +

list_all_projections/2

+
+

list_all_projections(PidSpec, ProjType) -> any()

+

Get all epoch numbers from the FLU's projection store.

+ +

list_all_projections/3

+
+

list_all_projections(PidSpec, ProjType, Timeout) -> any()

+

Get all epoch numbers from the FLU's projection store.

+ +

list_files/2

+
+

list_files(PidSpec, EpochID) -> any()

+

Fetch the list of all files on the remote FLU.

+ +

list_files/3

+
+

list_files(PidSpec, EpochID, Timeout) -> any()

+

Fetch the list of all files on the remote FLU.

+ +

quit/1

+
+

quit(PidSpec) -> any()

+

Quit & close the connection to remote FLU and stop our + proxy process.

+ +

read_chunk/5

+
+

read_chunk(PidSpec, EpochID, File, Offset, Size) -> any()

+

Read a chunk of data of size Size from File at Offset.

+ +

read_chunk/6

+
+

read_chunk(PidSpec, EpochID, File, Offset, Size, Timeout) -> any()

+

Read a chunk of data of size Size from File at Offset.

+ +

read_latest_projection/2

+
+

read_latest_projection(PidSpec, ProjType) -> any()

+

Get the latest projection from the FLU's projection store for ProjType

+ +

read_latest_projection/3

+
+

read_latest_projection(PidSpec, ProjType, Timeout) -> any()

+

Get the latest projection from the FLU's projection store for ProjType

+ +

read_projection/3

+
+

read_projection(PidSpec, ProjType, Epoch) -> any()

+

Read a projection Proj of type ProjType.

+ +

read_projection/4

+
+

read_projection(PidSpec, ProjType, Epoch, Timeout) -> any()

+

Read a projection Proj of type ProjType.

+ +

start_link/1

+
+

start_link(P_srvr) -> any()

+

Start a local, long-lived process that will be our steady + & reliable communication proxy with the fickle & flaky + remote Machi server.

+ +

terminate/2

+
+

terminate(Reason, S) -> any()

+
+ +

write_projection/3

+
+

write_projection(PidSpec, ProjType, Proj) -> any()

+

Write a projection Proj of type ProjType.

+ +

write_projection/4

+
+

write_projection(PidSpec, ProjType, Proj, Timeout) -> any()

+

Write a projection Proj of type ProjType.

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_sequencer.html b/edoc/machi_sequencer.html new file mode 100644 index 0000000..0956867 --- /dev/null +++ b/edoc/machi_sequencer.html @@ -0,0 +1,23 @@ + + + + +Module machi_sequencer + + + + +
+ +

Module machi_sequencer

+"Mothballed" sequencer code, perhaps to be reused sometime in + the future?. + + +

Description

"Mothballed" sequencer code, perhaps to be reused sometime in + the future?
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_sup.html b/edoc/machi_sup.html new file mode 100644 index 0000000..aaa08c6 --- /dev/null +++ b/edoc/machi_sup.html @@ -0,0 +1,39 @@ + + + + +Module machi_sup + + + + +
+ +

Module machi_sup

+Top Machi application supervisor. + +

Behaviours: supervisor.

+ +

Description

Top Machi application supervisor. +

Function Index

+ + +
init/1
start_link/0
+ +

Function Details

+ +

init/1

+
+

init(X1) -> any()

+
+ +

start_link/0

+
+

start_link() -> any()

+
+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/machi_util.html b/edoc/machi_util.html new file mode 100644 index 0000000..f85a247 --- /dev/null +++ b/edoc/machi_util.html @@ -0,0 +1,150 @@ + + + + +Module machi_util + + + + +
+ +

Module machi_util

+Miscellaneous utility functions. + + +

Description

Miscellaneous utility functions. +

Function Index

+ + + + + + + + + + + + + + + + + + + +
bin_to_hexstr/1Convert a binary() to a hexadecimal string.
checksum_chunk/1Calculate a checksum for a chunk of file data.
connect/2Create a TCP connection to a remote Machi server.
connect/3Create a TCP connection to a remote Machi server.
hexstr_to_bin/1Convert a hexadecimal string to a binary().
hexstr_to_int/1Convert a hexadecimal string to an integer.
increment_max_filenum/2Increase the file size of a config file, which is used as the + basis for a minimum sequence number.
info_msg/2Log an 'info' level message.
int_to_hexbin/2Convert an integer into a hexadecimal string (in binary() + form) whose length is based on I_size.
int_to_hexstr/2Convert an integer into a hexadecimal string whose length is + based on I_size.
make_binary/1Convert a compatible Erlang data type into a binary() equivalent.
make_checksum_filename/2Calculate a checksum file path, by common convention.
make_data_filename/2Calculate a file data file path, by common convention.
make_projection_filename/2Calculate a projection store file path, by common convention.
make_regname/1Create a registered name atom for FLU sequencer internal + rendezvous/message passing use.
make_string/1Convert a compatible Erlang data type into a string() equivalent.
read_max_filenum/2Read the file size of a config file, which is used as the + basis for a minimum sequence number.
verb/1Log a verbose message.
verb/2Log a verbose message.
+ +

Function Details

+ +

bin_to_hexstr/1

+
+

bin_to_hexstr(X1) -> any()

+

Convert a binary() to a hexadecimal string.

+ +

checksum_chunk/1

+
+

checksum_chunk(Chunk) -> any()

+

Calculate a checksum for a chunk of file data.

+ +

connect/2

+
+

connect(Host::inet:ip_address() | inet:hostname(), Port::inet:port_number()) -> port()

+

Create a TCP connection to a remote Machi server.

+ +

connect/3

+
+

connect(Host::inet:ip_address() | inet:hostname(), Port::inet:port_number(), Timeout::timeout()) -> port()

+

Create a TCP connection to a remote Machi server.

+ +

hexstr_to_bin/1

+
+

hexstr_to_bin(S) -> any()

+

Convert a hexadecimal string to a binary().

+ +

hexstr_to_int/1

+
+

hexstr_to_int(X) -> any()

+

Convert a hexadecimal string to an integer.

+ +

increment_max_filenum/2

+
+

increment_max_filenum(DataDir, Prefix) -> any()

+

Increase the file size of a config file, which is used as the + basis for a minimum sequence number.

+ +

info_msg/2

+
+

info_msg(Fmt, Args) -> any()

+

Log an 'info' level message.

+ +

int_to_hexbin/2

+
+

int_to_hexbin(I, I_size) -> any()

+

Convert an integer into a hexadecimal string (in binary() + form) whose length is based on I_size.

+ +

int_to_hexstr/2

+
+

int_to_hexstr(I, I_size) -> any()

+

Convert an integer into a hexadecimal string whose length is + based on I_size.

+ +

make_binary/1

+
+

make_binary(X) -> any()

+

Convert a compatible Erlang data type into a binary() equivalent.

+ +

make_checksum_filename/2

+
+

make_checksum_filename(DataDir, FileName) -> any()

+

Calculate a checksum file path, by common convention.

+ +

make_data_filename/2

+
+

make_data_filename(DataDir, File) -> any()

+

Calculate a file data file path, by common convention.

+ +

make_projection_filename/2

+
+

make_projection_filename(DataDir, File) -> any()

+

Calculate a projection store file path, by common convention.

+ +

make_regname/1

+
+

make_regname(Prefix) -> any()

+

Create a registered name atom for FLU sequencer internal + rendezvous/message passing use.

+ +

make_string/1

+
+

make_string(X) -> any()

+

Convert a compatible Erlang data type into a string() equivalent.

+ +

read_max_filenum/2

+
+

read_max_filenum(DataDir, Prefix) -> any()

+

Read the file size of a config file, which is used as the + basis for a minimum sequence number.

+ +

verb/1

+
+

verb(Fmt) -> any()

+

Log a verbose message.

+ +

verb/2

+
+

verb(Fmt, Args) -> any()

+

Log a verbose message.

+
+ + +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/modules-frame.html b/edoc/modules-frame.html new file mode 100644 index 0000000..fbe4efc --- /dev/null +++ b/edoc/modules-frame.html @@ -0,0 +1,24 @@ + + + +The machi application + + + +

Modules

+ + + + + + + + + + + + + +
machi_admin_util
machi_app
machi_chain_manager1
machi_chash
machi_flu1
machi_flu1_client
machi_flu_sup
machi_projection
machi_projection_store
machi_proxy_flu1_client
machi_sequencer
machi_sup
machi_util
+ + \ No newline at end of file diff --git a/edoc/overview-summary.html b/edoc/overview-summary.html new file mode 100644 index 0000000..a29913c --- /dev/null +++ b/edoc/overview-summary.html @@ -0,0 +1,185 @@ + + + + +Machi: a small village of replicated files + + + + + +

Machi: a small village of replicated files +

+ + +

About This EDoc Documentation

+ +

This EDoc-style documentation will concern itself only with Erlang +function APIs and function & data types. Higher-level design and +commentary will remain outside of the Erlang EDoc system; please see +the "Pointers to Other Machi Documentation" section below for more +details.

+ +

Readers should beware that this documentation may be out-of-sync with +the source code. When in doubt, use the make edoc command to +regenerate all HTML pages.

+ +

It is the developer's responsibility to re-generate the documentation +periodically and commit it to the Git repo.

+ +

Machi Code Overview

+ +

Chain Manager

+ +

The Chain Manager is responsible for managing the state of Machi's +"Chain Replication" state. This role is roughly analogous to the +"Riak Core" application inside of Riak, which takes care of +coordinating replica placement and replica repair.

+ +

For each primitive data server in the cluster, a Machi FLU, there is a +Chain Manager process that manages its FLU's role within the Machi +cluster's Chain Replication scheme. Each Chain Manager process +executes locally and independently to manage the distributed state of +a single Machi Chain Replication chain.

+ +
    + +
  • To contrast with Riak Core ... Riak Core's claimant process is + solely responsible for managing certain critical aspects of + Riak Core distributed state. Machi's Chain Manager process + performs similar tasks as Riak Core's claimant. However, Machi + has several active Chain Manager processes, one per FLU server, + instead of a single active process like Core's claimant. Each + Chain Manager process acts independently; each is constrained + so that it will reach consensus via independent computation + & action.

    + + Full discussion of this distributed consensus is outside the + scope of this document; see the "Pointers to Other Machi + Documentation" section below for more information. +
  • +
  • Machi differs from a Riak Core application because Machi's + replica placement policy is simply, "All Machi servers store + replicas of all Machi files". + Machi is intended to be a primitive building block for creating larger + cluster-of-clusters where files are + distributed/fragmented/sharded across a large pool of + independent Machi clusters. +
  • +
  • See + https://www.usenix.org/legacy/events/osdi04/tech/renesse.html + for a copy of the paper, "Chain Replication for Supporting High + Throughput and Availability" by Robbert van Renesse and Fred + B. Schneider. +
  • +
+ +

FLU

+ +

The FLU is the basic storage server for Machi.

+ +
    +
  • The name FLU is taken from "flash storage unit" from the paper + "CORFU: A Shared Log Design for Flash Clusters" by + Balakrishnan, Malkhi, Prabhakaran, and Wobber. See + https://www.usenix.org/conference/nsdi12/technical-sessions/presentation/balakrishnan +
  • +
  • In CORFU, the sequencer step is a prerequisite step that is + performed by a separate component, the Sequencer. + In Machi, the append_chunk() protocol message has + an implicit "sequencer" operation applied by the "head" of the + Machi Chain Replication chain. If a client wishes to write + data that has already been assigned a sequencer position, then + the write_chunk() API function is used. +
  • +
+ +

For each FLU, there are three independent tasks that are implemented +using three different Erlang processes:

+ +
    +
  • A FLU server, implemented primarily by machi_flu.erl. +
  • +
  • A projection store server, implemented primarily by + machi_projection_store.erl. +
  • +
  • A chain state manager server, implemented primarily by + machi_chain_manager1.erl. +
  • +
+ +

From the perspective of failure detection, it is very convenient that +all three FLU-related services (file server, sequencer server, and +projection server) are accessed using the same single TCP port.

+ +

Projection (data structure)

+ +

The projection is a data structure that specifies the current state +of the Machi cluster: all FLUs, which FLUS are considered +up/running or down/crashed/stopped, which FLUs are actively +participants in the Chain Replication protocol, and which FLUs are +under "repair" (i.e., having their data resyncronized when +newly-added to a cluster or when restarting after a crash).

+ +

Projection Store (server)

+ +

The projection store is a storage service that is implemented by an +Erlang/OTP gen_server process that is associated with each +FLU. Conceptually, the projection store is an array of +write-once registers. For each projection store register, the +key is a 2-tuple of an epoch number (non_neg_integer() type) +and a projection type (public or private type); the value is +a projection data structure (projection_v1() type).

+ +

Client and Proxy Client

+ +

Machi is intentionally avoiding using distributed Erlang for Machi's +communication. This design decision makes Erlang-side code more +difficult & complex but allows us the freedom of implementing +parts of Machi in other languages without major +protocol&API&glue code changes later in the product's +lifetime.

+ +

There are two layers of interface for Machi clients.

+ +
    +
  • The machi_flu1_client module implements an API that uses a + TCP socket directly. +
  • +
  • The machi_proxy_flu1_client module implements an API that + uses a local, long-lived gen_server process as a proxy for + the remote, perhaps disconnected-or-crashed Machi FLU server. +
  • +
+ +

The types for both modules ought to be the same. However, due to +rapid code churn, some differences might exist. Any major difference +is (almost by definition) a bug: please open a GitHub issue to request +a correction.

+ +

TODO notes

+ +

Any use of the string "TODO" in upper/lower/mixed case, anywhere in +the code, is a reminder signal of unfinished work.

+ +

Pointers to Other Machi Documentation

+ +
    +
  • If you are viewing this document locally, please look in the + ../doc/ directory, +
  • +
  • If you are viewing this document via the Web, please find the + documentation via this link: + http://github.com/basho/machi/tree/master/doc/ + Please be aware that this link points to the master branch + of the Machi source repository and therefore may be + out-of-sync with non-master branch code. +
  • + +
+ +
+ +

Generated by EDoc, Apr 8 2015, 17:31:11.

+ + diff --git a/edoc/overview.edoc b/edoc/overview.edoc new file mode 100644 index 0000000..04cf4cc --- /dev/null +++ b/edoc/overview.edoc @@ -0,0 +1,14 @@ + +@title Machi: a small village of replicated files + +@doc + +Documentation for Machi is an ongoing challenge. Much of the +high-level design & commentary are outside of the Erlang EDoc system + +Zoom2 zoom zoom zoom boom boom boom boom + +Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba +tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango +Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba +tango Rumba tango Rumba tango Rumba tango Rumba tango diff --git a/edoc/packages-frame.html b/edoc/packages-frame.html new file mode 100644 index 0000000..189d01c --- /dev/null +++ b/edoc/packages-frame.html @@ -0,0 +1,11 @@ + + + +The machi application + + + +

Packages

+
+ + \ No newline at end of file diff --git a/edoc/stylesheet.css b/edoc/stylesheet.css new file mode 100644 index 0000000..e426a90 --- /dev/null +++ b/edoc/stylesheet.css @@ -0,0 +1,55 @@ +/* standard EDoc style sheet */ +body { + font-family: Verdana, Arial, Helvetica, sans-serif; + margin-left: .25in; + margin-right: .2in; + margin-top: 0.2in; + margin-bottom: 0.2in; + color: #000000; + background-color: #ffffff; +} +h1,h2 { + margin-left: -0.2in; +} +div.navbar { + background-color: #add8e6; + padding: 0.2em; +} +h2.indextitle { + padding: 0.4em; + background-color: #add8e6; +} +h3.function,h3.typedecl { + background-color: #add8e6; + padding-left: 1em; +} +div.spec { + margin-left: 2em; + background-color: #eeeeee; +} +a.module,a.package { + text-decoration:none +} +a.module:hover,a.package:hover { + background-color: #eeeeee; +} +ul.definitions { + list-style-type: none; +} +ul.index { + list-style-type: none; + background-color: #eeeeee; +} + +/* + * Minor style tweaks + */ +ul { + list-style-type: square; +} +table { + border-collapse: collapse; +} +td { + padding: 3 +} diff --git a/rebar.config b/rebar.config index 5b3cfa2..afb0283 100644 --- a/rebar.config +++ b/rebar.config @@ -1,5 +1,6 @@ %%% {erl_opts, [warnings_as_errors, {parse_transform, lager_transform}, debug_info]}. {erl_opts, [{parse_transform, lager_transform}, debug_info]}. +{edoc_opts, [{dir, "./edoc"}]}. {deps, [ {lager, ".*", {git, "git://github.com/basho/lager.git", {tag, "2.0.1"}}} diff --git a/src/machi_admin_util.erl b/src/machi_admin_util.erl index 990d948..f0db9d0 100644 --- a/src/machi_admin_util.erl +++ b/src/machi_admin_util.erl @@ -18,6 +18,8 @@ %% %% ------------------------------------------------------------------- +%% @doc Machi chain replication administration utilities. + -module(machi_admin_util). %% TODO Move these types to a common header file? (also machi_flu1_client.erl?) @@ -114,7 +116,7 @@ verify_chunk_checksum(File, ReadChunk) -> fun({Offset, Size, CSum}, Acc) -> case ReadChunk(File, Offset, Size) of {ok, Chunk} -> - CSum2 = machi_util:checksum(Chunk), + CSum2 = machi_util:checksum_chunk(Chunk), if CSum == CSum2 -> Acc; true -> diff --git a/src/machi_app.erl b/src/machi_app.erl index 6dfddf7..2701f60 100644 --- a/src/machi_app.erl +++ b/src/machi_app.erl @@ -18,6 +18,8 @@ %% %% ------------------------------------------------------------------- +%% @doc Top-level supervisor for the Machi application. + -module(machi_app). -behaviour(application). diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index c554621..ef81558 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -19,6 +19,28 @@ %% under the License. %% %% ------------------------------------------------------------------- + +%% @doc The Machi chain manager, Guardian of all things related to +%% Chain Replication state, status, and data replica safety. +%% +%% The Chain Manager is responsible for managing the state of Machi's +%% "Chain Replication" state. This role is roughly analogous to the +%% "Riak Core" application inside of Riak, which takes care of +%% coordinating replica placement and replica repair. +%% +%% For each primitive data server in the cluster, a Machi FLU, there +%% is a Chain Manager process that manages its FLU's role within the +%% Machi cluster's Chain Replication scheme. Each Chain Manager +%% process executes locally and independently to manage the +%% distributed state of a single Machi Chain Replication chain. +%% +%% Machi's Chain Manager process performs similar tasks as Riak Core's +%% claimant. However, Machi has several active Chain Manager +%% processes, one per FLU server, instead of a single active process +%% like Core's claimant. Each Chain Manager process acts +%% independently; each is constrained so that it will reach consensus +%% via independent computation & action. + -module(machi_chain_manager1). %% TODO: I am going to sever the connection between the flowchart and the diff --git a/src/machi_chash.erl b/src/machi_chash.erl index f45473a..6ad46f3 100644 --- a/src/machi_chash.erl +++ b/src/machi_chash.erl @@ -16,9 +16,13 @@ %%% %%%------------------------------------------------------------------- -%% Consistent hashing library. Also known as "random slicing". -%% Originally from the Hibari DB source code at https://github.com/hibari +%% @doc Consistent hashing library. Also known as "random slicing". %% +%% This code was originally from the Hibari DB source code at +%% [https://github.com/hibari] + +-module(machi_chash). + %% TODO items: %% %% 1. Refactor to use bigints instead of floating point numbers. The @@ -26,8 +30,6 @@ %% much wiggle-room for making really small hashing range %% definitions. --module(machi_chash). - -define(SMALLEST_SIGNIFICANT_FLOAT_SIZE, 0.1e-12). -define(SHA_MAX, (1 bsl (20*8))). diff --git a/src/machi_flu1.erl b/src/machi_flu1.erl index bd34ff5..cb2c5fc 100644 --- a/src/machi_flu1.erl +++ b/src/machi_flu1.erl @@ -18,6 +18,33 @@ %% %% ------------------------------------------------------------------- +%% @doc The Machi FLU file server + file location sequencer. +%% +%% This module implements only the Machi FLU file server and its +%% implicit sequencer. +%% Please see the EDoc "Overview" for details about the FLU as a +%% primitive file server process vs. the larger Machi design of a FLU +%% as a sequencer + file server + chain manager group of processes. +%% +%% For the moment, this module also implements a rudimentary TCP-based +%% protocol as the sole supported access method to the server, +%% sequencer, and projection store. Conceptually, those three +%% services are independent and ought to have their own protocols. As +%% a practical matter, there is no need for wire protocol +%% compatibility. Furthermore, from the perspective of failure +%% detection, it is very convenient that all three FLU-related +%% services are accessed using the same single TCP port. +%% +%% The FLU is named after the CORFU server "FLU" or "FLash Unit" server. +%% +%% TODO There is one major missing feature in this FLU implementation: +%% there is no "write-once" enforcement for any position in a Machi +%% file. At the moment, we rely on correct behavior of the client +%% & the sequencer to avoid overwriting data. In the Real World, +%% however, all Machi file data is supposed to be exactly write-once +%% to avoid problems with bugs, wire protocol corruption, malicious +%% clients, etc. + -module(machi_flu1). -include_lib("kernel/include/file.hrl"). @@ -218,7 +245,7 @@ do_net_server_append2(RegName, Sock, LenHex, Prefix) -> <> = machi_util:hexstr_to_bin(LenHex), ok = inet:setopts(Sock, [{packet, raw}]), {ok, Chunk} = gen_tcp:recv(Sock, Len, 60*1000), - CSum = machi_util:checksum(Chunk), + CSum = machi_util:checksum_chunk(Chunk), try RegName ! {seq_append, self(), Prefix, Chunk, CSum} catch error:badarg -> @@ -300,7 +327,7 @@ do_net_server_write2(Sock, OffsetHex, LenHex, FileBin, DataDir, FHc) -> DoItFun = fun(FHd, Offset, Len) -> ok = inet:setopts(Sock, [{packet, raw}]), {ok, Chunk} = gen_tcp:recv(Sock, Len), - CSum = machi_util:checksum(Chunk), + CSum = machi_util:checksum_chunk(Chunk), case file:pwrite(FHd, Offset, Chunk) of ok -> CSumHex = machi_util:bin_to_hexstr(CSum), diff --git a/src/machi_flu1_client.erl b/src/machi_flu1_client.erl index 570c9fa..d2dac02 100644 --- a/src/machi_flu1_client.erl +++ b/src/machi_flu1_client.erl @@ -18,6 +18,8 @@ %% %% ------------------------------------------------------------------- +%% @doc Erlang API for the Machi FLU TCP protocol version 1. + -module(machi_flu1_client). -include("machi.hrl"). @@ -151,7 +153,7 @@ list_files(Host, TcpPort, EpochID) when is_integer(TcpPort) -> catch gen_tcp:close(Sock) end. -%% @doc Get the latest epoch number from the FLU's projection store. +%% @doc Get the latest epoch number + checksum from the FLU's projection store. -spec get_latest_epoch(port(), projection_type()) -> {ok, epoch_id()} | {error, term()}. @@ -159,7 +161,7 @@ get_latest_epoch(Sock, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> get_latest_epoch2(Sock, ProjType). -%% @doc Get the latest epoch number from the FLU's projection store. +%% @doc Get the latest epoch number + checksum from the FLU's projection store. -spec get_latest_epoch(inet_host(), inet_port(), projection_type()) -> @@ -173,7 +175,7 @@ get_latest_epoch(Host, TcpPort, ProjType) catch gen_tcp:close(Sock) end. -%% @doc Get the latest epoch number from the FLU's projection store. +%% @doc Get the latest projection from the FLU's projection store for `ProjType' -spec read_latest_projection(port(), projection_type()) -> {ok, projection()} | {error, not_written} | {error, term()}. @@ -181,7 +183,7 @@ read_latest_projection(Sock, ProjType) when ProjType == 'public' orelse ProjType == 'private' -> read_latest_projection2(Sock, ProjType). -%% @doc Get the latest epoch number from the FLU's projection store. +%% @doc Get the latest projection from the FLU's projection store for `ProjType' -spec read_latest_projection(inet_host(), inet_port(), projection_type()) -> @@ -368,7 +370,7 @@ append_chunk2(Sock, EpochID, Prefix0, Chunk0) -> erase(bad_sock), try %% TODO: add client-side checksum to the server's protocol - %% _ = crypto:hash(md5, Chunk), + %% _ = machi_util:checksum_chunk(Chunk), Prefix = machi_util:make_binary(Prefix0), Chunk = machi_util:make_binary(Chunk0), Len = iolist_size(Chunk0), @@ -536,7 +538,7 @@ write_chunk2(Sock, EpochID, File0, Offset, Chunk0) -> {EpochNum, EpochCSum} = EpochID, EpochIDRaw = <>, %% TODO: add client-side checksum to the server's protocol - %% _ = crypto:hash(md5, Chunk), + %% _ = machi_util:checksum_chunk(Chunk), File = machi_util:make_binary(File0), true = (Offset >= ?MINIMUM_OFFSET), OffsetHex = machi_util:int_to_hexbin(Offset, 64), diff --git a/src/machi_flu_sup.erl b/src/machi_flu_sup.erl index 4ad26fc..ce29502 100644 --- a/src/machi_flu_sup.erl +++ b/src/machi_flu_sup.erl @@ -18,6 +18,9 @@ %% %% ------------------------------------------------------------------- +%% @doc Supervisor for Machi FLU servers and their related support +%% servers. + -module(machi_flu_sup). -behaviour(supervisor). diff --git a/src/machi_projection.erl b/src/machi_projection.erl index d4f7e42..42bfc8a 100644 --- a/src/machi_projection.erl +++ b/src/machi_projection.erl @@ -18,6 +18,8 @@ %% %% ------------------------------------------------------------------- +%% @doc API for manipulating Machi projection data structures (i.e., records). + -module(machi_projection). -include("machi_projection.hrl"). @@ -30,13 +32,19 @@ make_projection_summary/1 ]). +%% @doc Create a new projection record. + new(MyName, All_list, UPI_list, Down_list, Repairing_list, Ps) -> new(0, MyName, All_list, Down_list, UPI_list, Repairing_list, Ps). +%% @doc Create a new projection record. + new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, Dbg) -> new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, Dbg, []). +%% @doc Create a new projection record. + new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, Dbg, Dbg2) when is_integer(EpochNum), EpochNum >= 0, @@ -87,15 +95,22 @@ new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, }, update_projection_dbg2(update_projection_checksum(P), Dbg2). +%% @doc Update the checksum element of a projection record. + update_projection_checksum(P) -> CSum = crypto:hash(sha, term_to_binary(P#projection_v1{epoch_csum= <<>>, dbg2=[]})), P#projection_v1{epoch_csum=CSum}. +%% @doc Update the `dbg2' element of a projection record. + update_projection_dbg2(P, Dbg2) when is_list(Dbg2) -> P#projection_v1{dbg2=Dbg2}. +%% @doc Compare two projection records for equality (assuming that the +%% checksum element has been correctly calculated). + -spec compare(#projection_v1{}, #projection_v1{}) -> integer(). compare(#projection_v1{epoch_number=E1, epoch_csum=C1}, @@ -107,6 +122,8 @@ compare(#projection_v1{epoch_number=E1}, E1 > E2 -> 1 end. +%% @doc Create a proplist-style summary of a projection record. + make_projection_summary(#projection_v1{epoch_number=EpochNum, all_members=_All_list, down=Down_list, diff --git a/src/machi_projection_store.erl b/src/machi_projection_store.erl index 09555d2..f4b9074 100644 --- a/src/machi_projection_store.erl +++ b/src/machi_projection_store.erl @@ -18,6 +18,25 @@ %% %% ------------------------------------------------------------------- +%% @doc The Machi write-once projection store service. +%% +%% This API is gen_server-style message passing, intended for use +%% within a single Erlang node to glue together the projection store +%% server with the node-local process that implements Machi's TCP +%% client access protocol (on the "server side" of the TCP connection). +%% +%% All Machi client access to the projection store SHOULD NOT use this +%% module's API. +%% +%% The projection store is implemented by an Erlang/OTP `gen_server' +%% process that is associated with each FLU. Conceptually, the +%% projection store is an array of write-once registers. For each +%% projection store register, the key is a 2-tuple of an epoch number +%% (`non_neg_integer()' type) and a projection type (`public' or +%% `private' type); the value is a projection data structure +%% (`projection_v1()' type). + + -module(machi_projection_store). -include("machi_projection.hrl"). @@ -48,35 +67,60 @@ max_private_epoch = ?NO_EPOCH :: {-1 | non_neg_integer(), binary()} }). +%% @doc Start a new projection store server. +%% +%% The `DataDir' argument should be the same directory as specified +%% for use by our companion FLU data server -- all file system paths +%% used by this server are intended to be stored underneath a common +%% file system parent directory as the FLU data server & sequencer +%% servers. + start_link(RegName, DataDir, NotifyWedgeStateChanges) -> gen_server:start_link({local, RegName}, ?MODULE, [DataDir, NotifyWedgeStateChanges], []). +%% @doc Fetch the latest epoch number + checksum for type `ProjType'. + get_latest_epoch(PidSpec, ProjType) -> get_latest_epoch(PidSpec, ProjType, infinity). +%% @doc Fetch the latest epoch number + checksum for type `ProjType'. +%% projection. + get_latest_epoch(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> g_call(PidSpec, {get_latest_epoch, ProjType}, Timeout). +%% @doc Fetch the latest projection record for type `ProjType'. + read_latest_projection(PidSpec, ProjType) -> read_latest_projection(PidSpec, ProjType, infinity). +%% @doc Fetch the latest projection record for type `ProjType'. + read_latest_projection(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> g_call(PidSpec, {read_latest_projection, ProjType}, Timeout). +%% @doc Fetch the projection record type `ProjType' for epoch number `Epoch' . + read(PidSpec, ProjType, Epoch) -> read(PidSpec, ProjType, Epoch, infinity). +%% @doc Fetch the projection record type `ProjType' for epoch number `Epoch' . + read(PidSpec, ProjType, Epoch, Timeout) when ProjType == 'public' orelse ProjType == 'private', is_integer(Epoch), Epoch >= 0 -> g_call(PidSpec, {read, ProjType, Epoch}, Timeout). +%% @doc Write the projection record type `ProjType' for epoch number `Epoch' . + write(PidSpec, ProjType, Proj) -> write(PidSpec, ProjType, Proj, infinity). +%% @doc Write the projection record type `ProjType' for epoch number `Epoch' . + write(PidSpec, ProjType, Proj, Timeout) when ProjType == 'public' orelse ProjType == 'private', is_record(Proj, projection_v1), @@ -84,16 +128,24 @@ write(PidSpec, ProjType, Proj, Timeout) Proj#projection_v1.epoch_number >= 0 -> g_call(PidSpec, {write, ProjType, Proj}, Timeout). +%% @doc Fetch all projection records of type `ProjType'. + get_all_projections(PidSpec, ProjType) -> get_all_projections(PidSpec, ProjType, infinity). +%% @doc Fetch all projection records of type `ProjType'. + get_all_projections(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> g_call(PidSpec, {get_all_projections, ProjType}, Timeout). +%% @doc Fetch all projection epoch numbers of type `ProjType'. + list_all_projections(PidSpec, ProjType) -> list_all_projections(PidSpec, ProjType, infinity). +%% @doc Fetch all projection epoch numbers of type `ProjType'. + list_all_projections(PidSpec, ProjType, Timeout) when ProjType == 'public' orelse ProjType == 'private' -> g_call(PidSpec, {list_all_projections, ProjType}, Timeout). diff --git a/src/machi_proxy_flu1_client.erl b/src/machi_proxy_flu1_client.erl index 5222fb9..f690c5c 100644 --- a/src/machi_proxy_flu1_client.erl +++ b/src/machi_proxy_flu1_client.erl @@ -18,6 +18,23 @@ %% %% ------------------------------------------------------------------- +%% @doc Erlang API for the Machi FLU TCP protocol version 1, with a +%% proxy-process style API for hiding messy details such as TCP +%% connection/disconnection with the remote Machi server. +%% +%% Machi is intentionally avoiding using distributed Erlang for +%% Machi's communication. This design decision makes Erlang-side code +%% more difficult & complex, but it's the price to pay for some +%% language independence. Later in Machi's life cycle, we need to +%% (re-)implement some components in a non-Erlang/BEAM-based language. +%% +%% This module implements a "man in the middle" proxy between the +%% Erlang client and Machi server (which is on the "far side" of a TCP +%% connection to somewhere). This proxy process will always execute +%% on the same Erlang node as the Erlang client that uses it. The +%% proxy is intended to be a stable, long-lived process that survives +%% TCP communication problems with the remote server. + -module(machi_proxy_flu1_client). -behaviour(gen_server). @@ -61,79 +78,128 @@ sock :: 'undefined' | port() }). +%% @doc Start a local, long-lived process that will be our steady +%% & reliable communication proxy with the fickle & flaky +%% remote Machi server. + start_link(#p_srvr{}=I) -> gen_server:start_link(?MODULE, [I], []). +%% @doc Append a chunk (binary- or iolist-style) of data to a file +%% with `Prefix'. + append_chunk(PidSpec, EpochID, Prefix, Chunk) -> append_chunk(PidSpec, EpochID, Prefix, Chunk, infinity). +%% @doc Append a chunk (binary- or iolist-style) of data to a file +%% with `Prefix'. + append_chunk(PidSpec, EpochID, Prefix, Chunk, Timeout) -> gen_server:call(PidSpec, {req, {append_chunk, EpochID, Prefix, Chunk}}, Timeout). +%% @doc Read a chunk of data of size `Size' from `File' at `Offset'. + read_chunk(PidSpec, EpochID, File, Offset, Size) -> read_chunk(PidSpec, EpochID, File, Offset, Size, infinity). +%% @doc Read a chunk of data of size `Size' from `File' at `Offset'. + read_chunk(PidSpec, EpochID, File, Offset, Size, Timeout) -> gen_server:call(PidSpec, {req, {read_chunk, EpochID, File, Offset, Size}}, Timeout). +%% @doc Fetch the list of chunk checksums for `File'. + checksum_list(PidSpec, EpochID, File) -> checksum_list(PidSpec, EpochID, File, infinity). +%% @doc Fetch the list of chunk checksums for `File'. + checksum_list(PidSpec, EpochID, File, Timeout) -> gen_server:call(PidSpec, {req, {checksum_list, EpochID, File}}, Timeout). +%% @doc Fetch the list of all files on the remote FLU. + list_files(PidSpec, EpochID) -> list_files(PidSpec, EpochID, infinity). +%% @doc Fetch the list of all files on the remote FLU. + list_files(PidSpec, EpochID, Timeout) -> gen_server:call(PidSpec, {req, {list_files, EpochID}}, Timeout). +%% @doc Get the latest epoch number + checksum from the FLU's projection store. + get_latest_epoch(PidSpec, ProjType) -> get_latest_epoch(PidSpec, ProjType, infinity). +%% @doc Get the latest epoch number + checksum from the FLU's projection store. + get_latest_epoch(PidSpec, ProjType, Timeout) -> gen_server:call(PidSpec, {req, {get_latest_epoch, ProjType}}, Timeout). +%% @doc Get the latest projection from the FLU's projection store for `ProjType' + read_latest_projection(PidSpec, ProjType) -> read_latest_projection(PidSpec, ProjType, infinity). +%% @doc Get the latest projection from the FLU's projection store for `ProjType' + read_latest_projection(PidSpec, ProjType, Timeout) -> gen_server:call(PidSpec, {req, {read_latest_projection, ProjType}}, Timeout). +%% @doc Read a projection `Proj' of type `ProjType'. + read_projection(PidSpec, ProjType, Epoch) -> read_projection(PidSpec, ProjType, Epoch, infinity). +%% @doc Read a projection `Proj' of type `ProjType'. + read_projection(PidSpec, ProjType, Epoch, Timeout) -> gen_server:call(PidSpec, {req, {read_projection, ProjType, Epoch}}, Timeout). +%% @doc Write a projection `Proj' of type `ProjType'. + write_projection(PidSpec, ProjType, Proj) -> write_projection(PidSpec, ProjType, Proj, infinity). +%% @doc Write a projection `Proj' of type `ProjType'. + write_projection(PidSpec, ProjType, Proj, Timeout) -> gen_server:call(PidSpec, {req, {write_projection, ProjType, Proj}}, Timeout). +%% @doc Get all projections from the FLU's projection store. + get_all_projections(PidSpec, ProjType) -> get_all_projections(PidSpec, ProjType, infinity). +%% @doc Get all projections from the FLU's projection store. + get_all_projections(PidSpec, ProjType, Timeout) -> gen_server:call(PidSpec, {req, {get_all_projections, ProjType}}, Timeout). +%% @doc Get all epoch numbers from the FLU's projection store. + list_all_projections(PidSpec, ProjType) -> list_all_projections(PidSpec, ProjType, infinity). +%% @doc Get all epoch numbers from the FLU's projection store. + list_all_projections(PidSpec, ProjType, Timeout) -> gen_server:call(PidSpec, {req, {list_all_projections, ProjType}}, Timeout). +%% @doc Quit & close the connection to remote FLU and stop our +%% proxy process. + quit(PidSpec) -> gen_server:call(PidSpec, quit, infinity). diff --git a/src/machi_sequencer.erl b/src/machi_sequencer.erl index ddd81a5..4d1116d 100644 --- a/src/machi_sequencer.erl +++ b/src/machi_sequencer.erl @@ -18,6 +18,9 @@ %% %% ------------------------------------------------------------------- +%% @doc "Mothballed" sequencer code, perhaps to be reused sometime in +%% the future? + -module(machi_sequencer). -compile(export_all). diff --git a/src/machi_sup.erl b/src/machi_sup.erl index dcaadbe..31fcc9b 100644 --- a/src/machi_sup.erl +++ b/src/machi_sup.erl @@ -18,6 +18,8 @@ %% %% ------------------------------------------------------------------- +%% @doc Top Machi application supervisor. + -module(machi_sup). -behaviour(supervisor). diff --git a/src/machi_util.erl b/src/machi_util.erl index af0ac29..9efbbc0 100644 --- a/src/machi_util.erl +++ b/src/machi_util.erl @@ -18,10 +18,12 @@ %% %% ------------------------------------------------------------------- +%% @doc Miscellaneous utility functions. + -module(machi_util). -export([ - checksum/1, + checksum_chunk/1, hexstr_to_bin/1, bin_to_hexstr/1, hexstr_to_int/1, int_to_hexstr/2, int_to_hexbin/2, make_binary/1, make_string/1, @@ -39,33 +41,34 @@ -include("machi_projection.hrl"). -include_lib("kernel/include/file.hrl"). -append(Server, Prefix, Chunk) when is_binary(Prefix), is_binary(Chunk) -> - CSum = checksum(Chunk), - Server ! {seq_append, self(), Prefix, Chunk, CSum}, - receive - {assignment, Offset, File} -> - {Offset, File} - after 10*1000 -> - bummer - end. +%% @doc Create a registered name atom for FLU sequencer internal +%% rendezvous/message passing use. make_regname(Prefix) when is_binary(Prefix) -> erlang:binary_to_atom(Prefix, latin1); make_regname(Prefix) when is_list(Prefix) -> erlang:list_to_atom(Prefix). +%% @doc Calculate a config file path, by common convention. + make_config_filename(DataDir, Prefix) -> lists:flatten(io_lib:format("~s/config/~s", [DataDir, Prefix])). +%% @doc Calculate a checksum file path, by common convention. + make_checksum_filename(DataDir, Prefix, SequencerName, FileNum) -> lists:flatten(io_lib:format("~s/config/~s.~s.~w.csum", [DataDir, Prefix, SequencerName, FileNum])). +%% @doc Calculate a checksum file path, by common convention. + make_checksum_filename(DataDir, "") -> lists:flatten(io_lib:format("~s/config", [DataDir])); make_checksum_filename(DataDir, FileName) -> lists:flatten(io_lib:format("~s/config/~s.csum", [DataDir, FileName])). +%% @doc Calculate a file data file path, by common convention. + make_data_filename(DataDir, "") -> FullPath = lists:flatten(io_lib:format("~s/data", [DataDir])), {"", FullPath}; @@ -73,17 +76,24 @@ make_data_filename(DataDir, File) -> FullPath = lists:flatten(io_lib:format("~s/data/~s", [DataDir, File])), {File, FullPath}. +%% @doc Calculate a file data file path, by common convention. + make_data_filename(DataDir, Prefix, SequencerName, FileNum) -> File = erlang:iolist_to_binary(io_lib:format("~s.~s.~w", [Prefix, SequencerName, FileNum])), FullPath = lists:flatten(io_lib:format("~s/data/~s", [DataDir, File])), {File, FullPath}. +%% @doc Calculate a projection store file path, by common convention. + make_projection_filename(DataDir, "") -> lists:flatten(io_lib:format("~s/projection", [DataDir])); make_projection_filename(DataDir, File) -> lists:flatten(io_lib:format("~s/projection/~s", [DataDir, File])). +%% @doc Read the file size of a config file, which is used as the +%% basis for a minimum sequence number. + read_max_filenum(DataDir, Prefix) -> case file:read_file_info(make_config_filename(DataDir, Prefix)) of {error, enoent} -> @@ -92,6 +102,9 @@ read_max_filenum(DataDir, Prefix) -> FI#file_info.size end. +%% @doc Increase the file size of a config file, which is used as the +%% basis for a minimum sequence number. + increment_max_filenum(DataDir, Prefix) -> try {ok, FH} = file:open(make_config_filename(DataDir, Prefix), [append]), @@ -103,6 +116,8 @@ increment_max_filenum(DataDir, Prefix) -> {error, Error, erlang:get_stacktrace()} end. +%% @doc Convert a hexadecimal string to a `binary()'. + hexstr_to_bin(S) when is_list(S) -> hexstr_to_bin(S, []); hexstr_to_bin(B) when is_binary(B) -> @@ -114,6 +129,8 @@ hexstr_to_bin([X,Y|T], Acc) -> {ok, [V], []} = io_lib:fread("~16u", [X,Y]), hexstr_to_bin(T, [V | Acc]). +%% @doc Convert a `binary()' to a hexadecimal string. + bin_to_hexstr(<<>>) -> []; bin_to_hexstr(<>) -> @@ -124,40 +141,60 @@ hex_digit(X) when X < 10 -> hex_digit(X) -> X - 10 + $a. +%% @doc Convert a compatible Erlang data type into a `binary()' equivalent. + make_binary(X) when is_binary(X) -> X; make_binary(X) when is_list(X) -> iolist_to_binary(X). +%% @doc Convert a compatible Erlang data type into a `string()' equivalent. + make_string(X) when is_list(X) -> lists:flatten(X); make_string(X) when is_binary(X) -> binary_to_list(X). +%% @doc Convert a hexadecimal string to an integer. + hexstr_to_int(X) -> B = hexstr_to_bin(X), B_size = byte_size(B) * 8, <> = B, I. +%% @doc Convert an integer into a hexadecimal string whose length is +%% based on `I_size'. + int_to_hexstr(I, I_size) -> bin_to_hexstr(<>). +%% @doc Convert an integer into a hexadecimal string (in `binary()' +%% form) whose length is based on `I_size'. + int_to_hexbin(I, I_size) -> list_to_binary(int_to_hexstr(I, I_size)). -checksum(Bin) when is_binary(Bin) -> - crypto:hash(md5, Bin). +%% @doc Calculate a checksum for a chunk of file data. + +checksum_chunk(Chunk) when is_binary(Chunk); is_list(Chunk) -> + crypto:hash(sha, Chunk). + +%% @doc Log a verbose message. verb(Fmt) -> verb(Fmt, []). +%% @doc Log a verbose message. + verb(Fmt, Args) -> case application:get_env(kernel, verbose) of {ok, true} -> io:format(Fmt, Args); _ -> ok end. +%% @doc Log an 'info' level message. + info_msg(Fmt, Args) -> case application:get_env(kernel, verbose) of {ok, false} -> ok; _ -> error_logger:info_msg(Fmt, Args) @@ -165,11 +202,15 @@ info_msg(Fmt, Args) -> %%%%%%%%%%%%%%%%% +%% @doc Create a TCP connection to a remote Machi server. + -spec connect(inet:ip_address() | inet:hostname(), inet:port_number()) -> port(). connect(Host, Port) -> escript_connect(Host, Port, 4500). +%% @doc Create a TCP connection to a remote Machi server. + -spec connect(inet:ip_address() | inet:hostname(), inet:port_number(), timeout()) -> port(). From e0cabf3cb6d97db85197fffb77bb1e592937cfbe Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Wed, 8 Apr 2015 17:58:49 +0900 Subject: [PATCH 07/22] Remove 'edoc' dir: it's moved to gh-pages --- edoc/.gitignore | 1 - edoc/edoc-info | 7 - edoc/erlang.png | Bin 2109 -> 0 bytes edoc/index.html | 17 -- edoc/machi_admin_util.html | 60 ------- edoc/machi_app.html | 39 ----- edoc/machi_chain_manager1.html | 155 ----------------- edoc/machi_chash.html | 171 ------------------ edoc/machi_flu1.html | 63 ------- edoc/machi_flu1_client.html | 278 ------------------------------ edoc/machi_flu_sup.html | 41 ----- edoc/machi_projection.html | 70 -------- edoc/machi_projection_store.html | 163 ------------------ edoc/machi_proxy_flu1_client.html | 222 ------------------------ edoc/machi_sequencer.html | 23 --- edoc/machi_sup.html | 39 ----- edoc/machi_util.html | 150 ---------------- edoc/modules-frame.html | 24 --- edoc/overview-summary.html | 185 -------------------- edoc/overview.edoc | 14 -- edoc/packages-frame.html | 11 -- edoc/stylesheet.css | 55 ------ 22 files changed, 1788 deletions(-) delete mode 100644 edoc/.gitignore delete mode 100644 edoc/edoc-info delete mode 100644 edoc/erlang.png delete mode 100644 edoc/index.html delete mode 100644 edoc/machi_admin_util.html delete mode 100644 edoc/machi_app.html delete mode 100644 edoc/machi_chain_manager1.html delete mode 100644 edoc/machi_chash.html delete mode 100644 edoc/machi_flu1.html delete mode 100644 edoc/machi_flu1_client.html delete mode 100644 edoc/machi_flu_sup.html delete mode 100644 edoc/machi_projection.html delete mode 100644 edoc/machi_projection_store.html delete mode 100644 edoc/machi_proxy_flu1_client.html delete mode 100644 edoc/machi_sequencer.html delete mode 100644 edoc/machi_sup.html delete mode 100644 edoc/machi_util.html delete mode 100644 edoc/modules-frame.html delete mode 100644 edoc/overview-summary.html delete mode 100644 edoc/overview.edoc delete mode 100644 edoc/packages-frame.html delete mode 100644 edoc/stylesheet.css diff --git a/edoc/.gitignore b/edoc/.gitignore deleted file mode 100644 index bcd672a..0000000 --- a/edoc/.gitignore +++ /dev/null @@ -1 +0,0 @@ -tmp.* diff --git a/edoc/edoc-info b/edoc/edoc-info deleted file mode 100644 index f119ed0..0000000 --- a/edoc/edoc-info +++ /dev/null @@ -1,7 +0,0 @@ -%% encoding: UTF-8 -{application,machi}. -{packages,[]}. -{modules,[machi_admin_util,machi_app,machi_chain_manager1,machi_chash, - machi_flu1,machi_flu1_client,machi_flu_sup,machi_projection, - machi_projection_store,machi_proxy_flu1_client,machi_sequencer, - machi_sup,machi_util]}. diff --git a/edoc/erlang.png b/edoc/erlang.png deleted file mode 100644 index 987a618e2403af895bfaf8c2f929e3a4f3746659..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2109 zcmV-D2*US?P)rez_nr%N ze)-p~%6|a|LA_bA=l=$|3jjqS$tjbGG?@TN0w$Azq7Z{YeQxKcpLO55vno1^u23DP&V=i9-KAAsU*ECy^#OtaDC!lVSo!+|-%T+LhTHP^Oqwx8m)b4r3V28JmV&6M#iG)&0;P`j>XGfomEIEK6wPkhI{{K?3#uAGq$!`N_F)TNX zAvuspF?^;c9h%CPWyTDc_03%r4N8+Yzzo_VSfa!zo_7F6D?<+-+KkHwXiWQR=Mr(9|K@{{xEjfDvAbS9uNCP&{)NNCoC?XA$aRe>R8-> z5N<#S_)$d|EYpJfPC?{`$Y~f4yjH&dxHXIGG8wiaLBD6usC87cg+dd&3WLJd4_TcmEeAOz8R>ikgW(9821 z{34Se09Y?KoG<_Y;DDSoyTk>fUN0YO5)3^Za{&s1JbidC9}56{px+f|K_0;YuL5h} z_9J3y%7ucwM)E4K#=Cn7tCjjRkKjnQuiFcM6{17Jt#5F}7z8~RYqW24xV?kAU6xQN zh+h4|SmO1;TdsVOaOeD*kKf}6I7=6ZNig_rtqV?Ov1HrU(P%Hi#6npSe>%qGaNK1w zW$v+r`r0>#p~AN^8b)#7Yesu(ys(>3SCYb4sF9%A9=kMHrLmzk}E&WPG~Jx z9!r{qo5M184t;<7I`t1AsNjv912EeKkHKtOSl%wbcjFh7L6|G?Q+{?radOvuEW$>1 zoc+c&F+u$^0f}1_2dN&lS#I#p3e&+|YGHlMzRC)%&8TnGt+p*;Oz z`0=D=n|qcN+f@07;QjB@ktLhZ`+qz;(xYDli^Pex&&wwU2V4N-a3b@veqHg2cvCRb zoi=ZerLk!4t5!s3?|ARuWx_4-VCgl|TY2qa@$Dr~5QdiT8?$oPpZhaF5UOZ&x=+I9 zt((`6wBPM((BS{;2lmSB;o%z{>=mg*1k2oLjI=+zcf5$4BIZmkOrjrE z*VY(<@FO?zBVDc+Q~Lh;LnlYodZ$J3tmWJBN4j~wVOWelzexhft2nY6A3PZAcm!q} z931CL#1Ki6;HM{agTbKF>3(R-yuF1&Apn3Nh@PGvv)K$mkVqu*^z@vaFgQ3kFfg!s z^=f26@{Ny=_w@7x1qHF$bEk5X$)wR}0s{l>V!TCGM=R5Ei1Ll8u7Z*N0G1CPgB zyLPP|0H{-FRUDJv`Ea=9fX zC63D4+FBlumz$eAJv~j5q*|@^_xC?_>XiL0K@bH61$;i=&CLx(QGb8`8#iu{BnjJW zHUvSgUcK7T&~W(h;koN8t5vB~Ha0dgnane1&RA#87dVcaOpEMM)6>)E&YiPZEXBpe zlarHk89g;+G#U#E3hL_W002xT6UTApOeR%UR_5g7q^73!_4PG2Hi|@|ii(Pfi3vIY z0ES^?Mx1IOizO0?e0_a!9483k`PtCk-rm~Unwpw=?b@~O?(WdgP^bMMAYlLg{dIM9 zOy}OcxVTs%k(@q#n$PF+`TXkYYA%;cr_*5ofWcr$PEL-Ai772Db)3`L*|~G)&eqn} zq@*OrbXim`UAiO`3XdK=%H#1=D%HHV>FMbqAtCAM=@!e}C6Cc))ai5zg~H3rYjkup zD=RBMKR+`wv!kN}1^{3fR#a3}RaLcP#}20|H!^bT)~%G3lp{xu!0_{Wr2hW?>({UQ z`T1F`)|D$)*3{IP&1UDKhLn_)sMYHH{QRkzV=$M?#W2idGFh!wf*`b7ZGC-xVPT=c zV1Vs&!otFoN~M>VQ$G_G6}5No-m0pqwzjr;?W@INu~;m#k*%qz(P%VUt#;3zJ^lUt zU0q%G?%kVzvF7cqQmLw|tA~e&XIqun*x2Ug=9-!s48ty7ycil9Di(|7aybkD7#y?%lgQ z9`Ewy%eDpgxlvJ3Cr+GTFc>(F+cg;(8TPc>y?b|jeEgLwR}LLIBoqp1+1c4_HrvO? z$J5g@G&D3gIC$2ITrQ7`iwh4AfA;K|OePZu1oriTVVG1Zl}e@S)~)mK@UU1cI-Ty| z!Gj8gg2UmUD2ibif*{e+(R4bU#bU|j@{Joe^7(uSf+8X!q*7@_M1;L=AqbM3oXp{H nT3T9A6wS=c+_!HZolgHhw9g$%O4Wbp00000NkvXXu0mjf3HKBY diff --git a/edoc/index.html b/edoc/index.html deleted file mode 100644 index be9d1af..0000000 --- a/edoc/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - - -The machi application - - - - - - -<h2>This page uses frames</h2> -<p>Your browser does not accept frames. -<br>You should go to the <a href="overview-summary.html">non-frame version</a> instead. -</p> - - - \ No newline at end of file diff --git a/edoc/machi_admin_util.html b/edoc/machi_admin_util.html deleted file mode 100644 index 744230a..0000000 --- a/edoc/machi_admin_util.html +++ /dev/null @@ -1,60 +0,0 @@ - - - - -Module machi_admin_util - - - - -
- -

Module machi_admin_util

-Machi chain replication administration utilities. - - -

Description

Machi chain replication administration utilities. -

Data Types

- -

inet_host()

-

inet_host() = inet:ip_address() | inet:hostname()

- - -

inet_port()

-

inet_port() = inet:port_number()

- - -

Function Index

- - - - -
verify_file_checksums_local/3
verify_file_checksums_local/4
verify_file_checksums_remote/3
verify_file_checksums_remote/4
- -

Function Details

- -

verify_file_checksums_local/3

-
-

verify_file_checksums_local(Sock1::port(), EpochID::machi_flu1_client:epoch_id(), Path::binary() | list()) -> {ok, [tuple()]} | {error, term()}

-
- -

verify_file_checksums_local/4

-
-

verify_file_checksums_local(Host::inet_host(), TcpPort::inet_port(), EpochID::machi_flu1_client:epoch_id(), Path::binary() | list()) -> {ok, [tuple()]} | {error, term()}

-
- -

verify_file_checksums_remote/3

-
-

verify_file_checksums_remote(Sock1::port(), EpochID::machi_flu1_client:epoch_id(), File::binary() | list()) -> {ok, [tuple()]} | {error, term()}

-
- -

verify_file_checksums_remote/4

-
-

verify_file_checksums_remote(Host::inet_host(), TcpPort::inet_port(), EpochID::machi_flu1_client:epoch_id(), File::binary() | list()) -> {ok, [tuple()]} | {error, term()}

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_app.html b/edoc/machi_app.html deleted file mode 100644 index 8bba636..0000000 --- a/edoc/machi_app.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - -Module machi_app - - - - -
- -

Module machi_app

-Top-level supervisor for the Machi application. - -

Behaviours: application.

- -

Description

Top-level supervisor for the Machi application. -

Function Index

- - -
start/2
stop/1
- -

Function Details

- -

start/2

-
-

start(StartType, StartArgs) -> any()

-
- -

stop/1

-
-

stop(State) -> any()

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_chain_manager1.html b/edoc/machi_chain_manager1.html deleted file mode 100644 index e2d0bec..0000000 --- a/edoc/machi_chain_manager1.html +++ /dev/null @@ -1,155 +0,0 @@ - - - - -Module machi_chain_manager1 - - - - -
- -

Module machi_chain_manager1

-The Machi chain manager, Guardian of all things related to -Chain Replication state, status, and data replica safety. - -

Behaviours: gen_server.

- -

Description

The Machi chain manager, Guardian of all things related to -Chain Replication state, status, and data replica safety.

- -

The Chain Manager is responsible for managing the state of Machi's -"Chain Replication" state. This role is roughly analogous to the -"Riak Core" application inside of Riak, which takes care of -coordinating replica placement and replica repair.

- -

For each primitive data server in the cluster, a Machi FLU, there -is a Chain Manager process that manages its FLU's role within the -Machi cluster's Chain Replication scheme. Each Chain Manager -process executes locally and independently to manage the -distributed state of a single Machi Chain Replication chain.

- - Machi's Chain Manager process performs similar tasks as Riak Core's - claimant. However, Machi has several active Chain Manager - processes, one per FLU server, instead of a single active process - like Core's claimant. Each Chain Manager process acts - independently; each is constrained so that it will reach consensus - via independent computation & action. -

Function Index

- - - - - - - - - - - - - - - - - - -
code_change/3
get_all_hosed/1
handle_call/3
handle_cast/2
handle_info/2
init/1
make_projection_summary/1
ping/1
projection_transitions_are_sane/2
start_link/3
start_link/4
stop/1
terminate/2
test_calc_projection/2
test_calc_proposed_projection/1
test_react_to_env/1
test_read_latest_public_projection/2
test_write_proposed_projection/1
- -

Function Details

- -

code_change/3

-
-

code_change(OldVsn, S, Extra) -> any()

-
- -

get_all_hosed/1

-
-

get_all_hosed(P) -> any()

-
- -

handle_call/3

-
-

handle_call(Call, From, Ch_mgr) -> any()

-
- -

handle_cast/2

-
-

handle_cast(Cast, Ch_mgr) -> any()

-
- -

handle_info/2

-
-

handle_info(Msg, S) -> any()

-
- -

init/1

-
-

init(X1) -> any()

-
- -

make_projection_summary/1

-
-

make_projection_summary(Projection_v1) -> any()

-
- -

ping/1

-
-

ping(Pid) -> any()

-
- -

projection_transitions_are_sane/2

-
-

projection_transitions_are_sane(Ps, RelativeToServer) -> any()

-
- -

start_link/3

-
-

start_link(MyName, All_list, MyFLUPid) -> any()

-
- -

start_link/4

-
-

start_link(MyName, All_list, MyFLUPid, MgrOpts) -> any()

-
- -

stop/1

-
-

stop(Pid) -> any()

-
- -

terminate/2

-
-

terminate(Reason, S) -> any()

-
- -

test_calc_projection/2

-
-

test_calc_projection(Pid, KeepRunenvP) -> any()

-
- -

test_calc_proposed_projection/1

-
-

test_calc_proposed_projection(Pid) -> any()

-
- -

test_react_to_env/1

-
-

test_react_to_env(Pid) -> any()

-
- -

test_read_latest_public_projection/2

-
-

test_read_latest_public_projection(Pid, ReadRepairP) -> any()

-
- -

test_write_proposed_projection/1

-
-

test_write_proposed_projection(Pid) -> any()

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_chash.html b/edoc/machi_chash.html deleted file mode 100644 index 7f2f293..0000000 --- a/edoc/machi_chash.html +++ /dev/null @@ -1,171 +0,0 @@ - - - - -Module machi_chash - - - - -
- -

Module machi_chash

-Consistent hashing library. - - -

Description

Consistent hashing library. Also known as "random slicing".

- - This code was originally from the Hibari DB source code at - https://github.com/hibari -

Data Types

- -

float_map()

-

float_map() = [{owner_name(), float()}]

-

A float map subdivides the unit interval, starting at 0.0, to - partitions that are assigned to various owners. The sum of all - floats must be exactly 1.0 (or close enough for floating point - purposes).

- -

float_tree()

-

abstract datatype: float_tree()

-

We can't use gb_trees:tree() because 'nil' (the empty tree) is - never valid in our case. But teaching Dialyzer that is difficult.

- -

nextfloat_list()

-

nextfloat_list() = [{float(), brick()}]

-

A nextfloat_list - differs from a float_map in two respects: 1) nextfloat_list contains - tuples with the brick name in 2nd position, 2) the float() at each - position I_n > I_m, for all n, m such that n > m. - For example, a nextfloat_list of the float_map example above, - [{0.25, {br1, nd1}}, {0.75, {br2, nd1}}, {1.0, {br3, nd1}].

- -

owner_int_range()

-

owner_int_range() = {owner_name(), non_neg_integer(), non_neg_integer()}

-

Used when "prettying" a float map.

- -

owner_name()

-

owner_name() = term()

-

Owner for a range on the unit interval. We are agnostic about its - type.

- -

owner_weight()

-

owner_weight() = {owner_name(), weight()}

- - -

owner_weight_list()

-

owner_weight_list() = [owner_weight()]

-

A owner_weight_list is a definition of brick assignments over the - unit interval [0.0, 1.0]. The sum of all floats must be 1.0. For - example, [{{br1,nd1}, 0.25}, {{br2,nd1}, 0.5}, {{br3,nd1}, 0.25}].

- -

weight()

-

weight() = non_neg_integer()

-

For this library, a weight is an integer which specifies the - capacity of a "owner" relative to other owners. For example, if - owner A with a weight of 10, and if owner B has a weight of 20, - then B will be assigned twice as much of the unit interval as A.

- -

Function Index

- - - - - - - - - - - - -
hash_binary_via_float_map/2Query a float map with a binary (inefficient).
hash_binary_via_float_tree/2Query a float tree with a binary.
make_demo_map1/0Create a sample float map.
make_demo_map2/0Create a sample float map.
make_float_map/1Create a float map, based on a basic owner weight list.
make_float_map/2Create a float map, based on an older float map and a new weight -list.
make_tree/1Create a float tree, which is the rapid lookup data structure - for consistent hash queries.
pretty_with_integers/2Make a pretty/human-friendly version of a float map that describes - integer ranges between 1 and Scale.
pretty_with_integers/3Make a pretty/human-friendly version of a float map (based - upon a float map created from OldWeights and NewWeights) that - describes integer ranges between 1 and Scale.
query_tree/2Low-level function for querying a float tree: the (floating - point) point within the unit interval.
sum_map_weights/1Create a human-friendly summary of a float map.
zzz_usage_details/0Various usage examples, see source code below this function - for full details.
- -

Function Details

- -

hash_binary_via_float_map/2

-
-

hash_binary_via_float_map(Key::binary(), Map::float_map()) -> {float(), owner_name()}

-

Query a float map with a binary (inefficient).

- -

hash_binary_via_float_tree/2

-
-

hash_binary_via_float_tree(Key::binary(), Tree::float_tree()) -> {float(), owner_name()}

-

Query a float tree with a binary.

- -

make_demo_map1/0

-
-

make_demo_map1() -> float_map()

-

Create a sample float map.

- -

make_demo_map2/0

-
-

make_demo_map2() -> float_map()

-

Create a sample float map.

- -

make_float_map/1

-
-

make_float_map(NewOwnerWeights::owner_weight_list()) -> float_map()

-

Create a float map, based on a basic owner weight list.

- -

make_float_map/2

-
-

make_float_map(OldFloatMap::float_map(), NewOwnerWeights::owner_weight_list()) -> float_map()

-

Create a float map, based on an older float map and a new weight -list.

- - The weights in the new weight list may be different than (or the - same as) whatever weights were used to make the older float map.

- -

make_tree/1

-
-

make_tree(Map::float_map()) -> float_tree()

-

Create a float tree, which is the rapid lookup data structure - for consistent hash queries.

- -

pretty_with_integers/2

-
-

pretty_with_integers(Map::float_map(), Scale::integer()) -> [owner_int_range()]

-

Make a pretty/human-friendly version of a float map that describes - integer ranges between 1 and Scale.

- -

pretty_with_integers/3

-
-

pretty_with_integers(OldWeights::owner_weight_list(), NewWeights::owner_weight_list(), Scale::integer()) -> [owner_int_range()]

-

Make a pretty/human-friendly version of a float map (based - upon a float map created from OldWeights and NewWeights) that - describes integer ranges between 1 and Scale.

- -

query_tree/2

-
-

query_tree(Val::float(), Tree::float_tree()) -> {float(), owner_name()}

-

Low-level function for querying a float tree: the (floating - point) point within the unit interval.

- -

sum_map_weights/1

-
-

sum_map_weights(Map::float_map()) -> {{per_owner, float_map()}, {weight_sum, float()}}

-

Create a human-friendly summary of a float map.

- - The two parts of the summary are: a per-owner total of the unit - interval range(s) owned by each owner, and a total sum of all - per-owner ranges (which should be 1.0 but is not enforced).

- -

zzz_usage_details/0

-
-

zzz_usage_details() -> any()

-

Various usage examples, see source code below this function - for full details.

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_flu1.html b/edoc/machi_flu1.html deleted file mode 100644 index 3ce6902..0000000 --- a/edoc/machi_flu1.html +++ /dev/null @@ -1,63 +0,0 @@ - - - - -Module machi_flu1 - - - - -
- -

Module machi_flu1

-The Machi FLU file server + file location sequencer. - - -

Description

The Machi FLU file server + file location sequencer.

- -

This module implements only the Machi FLU file server and its -implicit sequencer. -Please see the EDoc "Overview" for details about the FLU as a -primitive file server process vs. the larger Machi design of a FLU -as a sequencer + file server + chain manager group of processes.

- -

For the moment, this module also implements a rudimentary TCP-based -protocol as the sole supported access method to the server, -sequencer, and projection store. Conceptually, those three -services are independent and ought to have their own protocols. As -a practical matter, there is no need for wire protocol -compatibility. Furthermore, from the perspective of failure -detection, it is very convenient that all three FLU-related -services are accessed using the same single TCP port.

- -

The FLU is named after the CORFU server "FLU" or "FLash Unit" server.

- - TODO There is one major missing feature in this FLU implementation: - there is no "write-once" enforcement for any position in a Machi - file. At the moment, we rely on correct behavior of the client - & the sequencer to avoid overwriting data. In the Real World, - however, all Machi file data is supposed to be exactly write-once - to avoid problems with bugs, wire protocol corruption, malicious - clients, etc. -

Function Index

- - -
start_link/1
stop/1
- -

Function Details

- -

start_link/1

-
-

start_link(Rest) -> any()

-
- -

stop/1

-
-

stop(Pid) -> any()

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_flu1_client.html b/edoc/machi_flu1_client.html deleted file mode 100644 index f1f6203..0000000 --- a/edoc/machi_flu1_client.html +++ /dev/null @@ -1,278 +0,0 @@ - - - - -Module machi_flu1_client - - - - -
- -

Module machi_flu1_client

-Erlang API for the Machi FLU TCP protocol version 1. - - -

Description

Erlang API for the Machi FLU TCP protocol version 1. -

Data Types

- -

chunk()

-

chunk() = binary() | iolist()

-

client can use either

- -

chunk_csum()

-

chunk_csum() = {file_offset(), chunk_size(), binary()}

- - -

chunk_pos()

-

chunk_pos() = {file_offset(), chunk_size(), file_name_s()}

- - -

chunk_s()

-

chunk_s() = binary()

-

server always uses binary()

- -

chunk_size()

-

chunk_size() = non_neg_integer()

- - -

epoch_csum()

-

epoch_csum() = binary()

- - -

epoch_id()

-

epoch_id() = {epoch_num(), epoch_csum()}

- - -

epoch_num()

-

epoch_num() = -1 | non_neg_integer()

- - -

file_info()

-

file_info() = {file_size(), file_name_s()}

- - -

file_name()

-

file_name() = binary() | list()

- - -

file_name_s()

-

file_name_s() = binary()

-

server reply

- -

file_offset()

-

file_offset() = non_neg_integer()

- - -

file_prefix()

-

file_prefix() = binary() | list()

- - -

file_size()

-

file_size() = non_neg_integer()

- - -

inet_host()

-

inet_host() = inet:ip_address() | inet:hostname()

- - -

inet_port()

-

inet_port() = inet:port_number()

- - -

projection()

-

projection() = #projection_v1{}

- - -

projection_type()

-

projection_type() = public | private

- - -

Function Index

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
append_chunk/4Append a chunk (binary- or iolist-style) of data to a file - with Prefix.
append_chunk/5Append a chunk (binary- or iolist-style) of data to a file - with Prefix.
checksum_list/3Fetch the list of chunk checksums for File.
checksum_list/4Fetch the list of chunk checksums for File.
delete_migration/3Restricted API: Delete a file after it has been successfully - migrated.
delete_migration/4Restricted API: Delete a file after it has been successfully - migrated.
get_all_projections/2Get all projections from the FLU's projection store.
get_all_projections/3Get all projections from the FLU's projection store.
get_latest_epoch/2Get the latest epoch number + checksum from the FLU's projection store.
get_latest_epoch/3Get the latest epoch number + checksum from the FLU's projection store.
list_all_projections/2Get all epoch numbers from the FLU's projection store.
list_all_projections/3Get all epoch numbers from the FLU's projection store.
list_files/2Fetch the list of all files on the remote FLU.
list_files/3Fetch the list of all files on the remote FLU.
quit/1Quit & close the connection to remote FLU.
read_chunk/5Read a chunk of data of size Size from File at Offset.
read_chunk/6Read a chunk of data of size Size from File at Offset.
read_latest_projection/2Get the latest projection from the FLU's projection store for ProjType
read_latest_projection/3Get the latest projection from the FLU's projection store for ProjType
read_projection/3Read a projection Proj of type ProjType.
read_projection/4Read a projection Proj of type ProjType.
trunc_hack/3Restricted API: Truncate a file after it has been successfully - erasure coded.
trunc_hack/4Restricted API: Truncate a file after it has been successfully - erasure coded.
write_chunk/5Restricted API: Write a chunk of already-sequenced data to - File at Offset.
write_chunk/6Restricted API: Write a chunk of already-sequenced data to - File at Offset.
write_projection/3Write a projection Proj of type ProjType.
write_projection/4Write a projection Proj of type ProjType.
- -

Function Details

- -

append_chunk/4

-
-

append_chunk(Sock::port(), EpochID::epoch_id(), Prefix::file_prefix(), Chunk::chunk()) -> {ok, chunk_pos()} | {error, term()}

-

Append a chunk (binary- or iolist-style) of data to a file - with Prefix.

- -

append_chunk/5

-
-

append_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), Prefix::file_prefix(), Chunk::chunk()) -> {ok, chunk_pos()} | {error, term()}

-

Append a chunk (binary- or iolist-style) of data to a file - with Prefix.

- -

checksum_list/3

-
-

checksum_list(Sock::port(), EpochID::epoch_id(), File::file_name()) -> {ok, [chunk_csum()]} | {error, term()}

-

Fetch the list of chunk checksums for File.

- -

checksum_list/4

-
-

checksum_list(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> {ok, [chunk_csum()]} | {error, term()}

-

Fetch the list of chunk checksums for File.

- -

delete_migration/3

-
-

delete_migration(Sock::port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

-

Restricted API: Delete a file after it has been successfully - migrated.

- -

delete_migration/4

-
-

delete_migration(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

-

Restricted API: Delete a file after it has been successfully - migrated.

- -

get_all_projections/2

-
-

get_all_projections(Sock::port(), ProjType::projection_type()) -> {ok, [projection()]} | {error, term()}

-

Get all projections from the FLU's projection store.

- -

get_all_projections/3

-
-

get_all_projections(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, [projection()]} | {error, term()}

-

Get all projections from the FLU's projection store.

- -

get_latest_epoch/2

-
-

get_latest_epoch(Sock::port(), ProjType::projection_type()) -> {ok, epoch_id()} | {error, term()}

-

Get the latest epoch number + checksum from the FLU's projection store.

- -

get_latest_epoch/3

-
-

get_latest_epoch(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, epoch_id()} | {error, term()}

-

Get the latest epoch number + checksum from the FLU's projection store.

- -

list_all_projections/2

-
-

list_all_projections(Sock::port(), ProjType::projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}

-

Get all epoch numbers from the FLU's projection store.

- -

list_all_projections/3

-
-

list_all_projections(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, [non_neg_integer()]} | {error, term()}

-

Get all epoch numbers from the FLU's projection store.

- -

list_files/2

-
-

list_files(Sock::port(), EpochID::epoch_id()) -> {ok, [file_info()]} | {error, term()}

-

Fetch the list of all files on the remote FLU.

- -

list_files/3

-
-

list_files(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id()) -> {ok, [file_info()]} | {error, term()}

-

Fetch the list of all files on the remote FLU.

- -

quit/1

-
-

quit(Sock::port()) -> ok

-

Quit & close the connection to remote FLU.

- -

read_chunk/5

-
-

read_chunk(Sock::port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Size::chunk_size()) -> {ok, chunk_s()} | {error, term()}

-

Read a chunk of data of size Size from File at Offset.

- -

read_chunk/6

-
-

read_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Size::chunk_size()) -> {ok, chunk_s()} | {error, term()}

-

Read a chunk of data of size Size from File at Offset.

- -

read_latest_projection/2

-
-

read_latest_projection(Sock::port(), ProjType::projection_type()) -> {ok, projection()} | {error, not_written} | {error, term()}

-

Get the latest projection from the FLU's projection store for ProjType

- -

read_latest_projection/3

-
-

read_latest_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type()) -> {ok, projection()} | {error, not_written} | {error, term()}

-

Get the latest projection from the FLU's projection store for ProjType

- -

read_projection/3

-
-

read_projection(Sock::port(), ProjType::projection_type(), Epoch::epoch_num()) -> {ok, projection()} | {error, written} | {error, term()}

-

Read a projection Proj of type ProjType.

- -

read_projection/4

-
-

read_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type(), Epoch::epoch_num()) -> {ok, projection()} | {error, written} | {error, term()}

-

Read a projection Proj of type ProjType.

- -

trunc_hack/3

-
-

trunc_hack(Sock::port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

-

Restricted API: Truncate a file after it has been successfully - erasure coded.

- -

trunc_hack/4

-
-

trunc_hack(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name()) -> ok | {error, term()}

-

Restricted API: Truncate a file after it has been successfully - erasure coded.

- -

write_chunk/5

-
-

write_chunk(Sock::port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Chunk::chunk()) -> ok | {error, term()}

-

Restricted API: Write a chunk of already-sequenced data to - File at Offset.

- -

write_chunk/6

-
-

write_chunk(Host::inet_host(), TcpPort::inet_port(), EpochID::epoch_id(), File::file_name(), Offset::file_offset(), Chunk::chunk()) -> ok | {error, term()}

-

Restricted API: Write a chunk of already-sequenced data to - File at Offset.

- -

write_projection/3

-
-

write_projection(Sock::port(), ProjType::projection_type(), Proj::projection()) -> ok | {error, written} | {error, term()}

-

Write a projection Proj of type ProjType.

- -

write_projection/4

-
-

write_projection(Host::inet_host(), TcpPort::inet_port(), ProjType::projection_type(), Proj::projection()) -> ok | {error, written} | {error, term()}

-

Write a projection Proj of type ProjType.

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_flu_sup.html b/edoc/machi_flu_sup.html deleted file mode 100644 index 46a3345..0000000 --- a/edoc/machi_flu_sup.html +++ /dev/null @@ -1,41 +0,0 @@ - - - - -Module machi_flu_sup - - - - -
- -

Module machi_flu_sup

-Supervisor for Machi FLU servers and their related support - servers. - -

Behaviours: supervisor.

- -

Description

Supervisor for Machi FLU servers and their related support - servers. -

Function Index

- - -
init/1
start_link/0
- -

Function Details

- -

init/1

-
-

init(X1) -> any()

-
- -

start_link/0

-
-

start_link() -> any()

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_projection.html b/edoc/machi_projection.html deleted file mode 100644 index 59f9229..0000000 --- a/edoc/machi_projection.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - -Module machi_projection - - - - -
- -

Module machi_projection

-API for manipulating Machi projection data structures (i.e., records). - - -

Description

API for manipulating Machi projection data structures (i.e., records). -

Function Index

- - - - - - - -
compare/2Compare two projection records for equality (assuming that the - checksum element has been correctly calculated).
make_projection_summary/1Create a proplist-style summary of a projection record.
new/6Create a new projection record.
new/7Create a new projection record.
new/8Create a new projection record.
update_projection_checksum/1Update the checksum element of a projection record.
update_projection_dbg2/2Update the dbg2 element of a projection record.
- -

Function Details

- -

compare/2

-
-

compare(Projection_v1::#projection_v1{}, Projection_v1::#projection_v1{}) -> integer()

-

Compare two projection records for equality (assuming that the - checksum element has been correctly calculated).

- -

make_projection_summary/1

-
-

make_projection_summary(Projection_v1) -> any()

-

Create a proplist-style summary of a projection record.

- -

new/6

-
-

new(MyName, All_list, UPI_list, Down_list, Repairing_list, Ps) -> any()

-

Create a new projection record.

- -

new/7

-
-

new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, Dbg) -> any()

-

Create a new projection record.

- -

new/8

-
-

new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, Dbg, Dbg2) -> any()

-

Create a new projection record.

- -

update_projection_checksum/1

-
-

update_projection_checksum(P) -> any()

-

Update the checksum element of a projection record.

- -

update_projection_dbg2/2

-
-

update_projection_dbg2(P, Dbg2) -> any()

-

Update the dbg2 element of a projection record.

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_projection_store.html b/edoc/machi_projection_store.html deleted file mode 100644 index ed25ba3..0000000 --- a/edoc/machi_projection_store.html +++ /dev/null @@ -1,163 +0,0 @@ - - - - -Module machi_projection_store - - - - -
- -

Module machi_projection_store

-The Machi write-once projection store service. - - -

Description

The Machi write-once projection store service.

- -

This API is gen_server-style message passing, intended for use -within a single Erlang node to glue together the projection store -server with the node-local process that implements Machi's TCP -client access protocol (on the "server side" of the TCP connection).

- -

All Machi client access to the projection store SHOULD NOT use this -module's API.

- - The projection store is implemented by an Erlang/OTP gen_server - process that is associated with each FLU. Conceptually, the - projection store is an array of write-once registers. For each - projection store register, the key is a 2-tuple of an epoch number - (non_neg_integer() type) and a projection type (public or - private type); the value is a projection data structure - (projection_v1() type). -

Function Index

- - - - - - - - - - - - - - - - - - - -
code_change/3
get_all_projections/2Fetch all projection records of type ProjType.
get_all_projections/3Fetch all projection records of type ProjType.
get_latest_epoch/2Fetch the latest epoch number + checksum for type ProjType.
get_latest_epoch/3Fetch the latest epoch number + checksum for type ProjType.
handle_call/3
handle_cast/2
handle_info/2
init/1
list_all_projections/2Fetch all projection epoch numbers of type ProjType.
list_all_projections/3Fetch all projection epoch numbers of type ProjType.
read/3Fetch the projection record type ProjType for epoch number Epoch .
read/4Fetch the projection record type ProjType for epoch number Epoch .
read_latest_projection/2Fetch the latest projection record for type ProjType.
read_latest_projection/3Fetch the latest projection record for type ProjType.
start_link/3Start a new projection store server.
terminate/2
write/3Write the projection record type ProjType for epoch number Epoch .
write/4Write the projection record type ProjType for epoch number Epoch .
- -

Function Details

- -

code_change/3

-
-

code_change(OldVsn, S, Extra) -> any()

-
- -

get_all_projections/2

-
-

get_all_projections(PidSpec, ProjType) -> any()

-

Fetch all projection records of type ProjType.

- -

get_all_projections/3

-
-

get_all_projections(PidSpec, ProjType, Timeout) -> any()

-

Fetch all projection records of type ProjType.

- -

get_latest_epoch/2

-
-

get_latest_epoch(PidSpec, ProjType) -> any()

-

Fetch the latest epoch number + checksum for type ProjType.

- -

get_latest_epoch/3

-
-

get_latest_epoch(PidSpec, ProjType, Timeout) -> any()

-

Fetch the latest epoch number + checksum for type ProjType. - projection.

- -

handle_call/3

-
-

handle_call(Request, From, S) -> any()

-
- -

handle_cast/2

-
-

handle_cast(Msg, S) -> any()

-
- -

handle_info/2

-
-

handle_info(Info, S) -> any()

-
- -

init/1

-
-

init(X1) -> any()

-
- -

list_all_projections/2

-
-

list_all_projections(PidSpec, ProjType) -> any()

-

Fetch all projection epoch numbers of type ProjType.

- -

list_all_projections/3

-
-

list_all_projections(PidSpec, ProjType, Timeout) -> any()

-

Fetch all projection epoch numbers of type ProjType.

- -

read/3

-
-

read(PidSpec, ProjType, Epoch) -> any()

-

Fetch the projection record type ProjType for epoch number Epoch .

- -

read/4

-
-

read(PidSpec, ProjType, Epoch, Timeout) -> any()

-

Fetch the projection record type ProjType for epoch number Epoch .

- -

read_latest_projection/2

-
-

read_latest_projection(PidSpec, ProjType) -> any()

-

Fetch the latest projection record for type ProjType.

- -

read_latest_projection/3

-
-

read_latest_projection(PidSpec, ProjType, Timeout) -> any()

-

Fetch the latest projection record for type ProjType.

- -

start_link/3

-
-

start_link(RegName, DataDir, NotifyWedgeStateChanges) -> any()

-

Start a new projection store server.

- - The DataDir argument should be the same directory as specified - for use by our companion FLU data server -- all file system paths - used by this server are intended to be stored underneath a common - file system parent directory as the FLU data server & sequencer - servers.

- -

terminate/2

-
-

terminate(Reason, S) -> any()

-
- -

write/3

-
-

write(PidSpec, ProjType, Proj) -> any()

-

Write the projection record type ProjType for epoch number Epoch .

- -

write/4

-
-

write(PidSpec, ProjType, Proj, Timeout) -> any()

-

Write the projection record type ProjType for epoch number Epoch .

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_proxy_flu1_client.html b/edoc/machi_proxy_flu1_client.html deleted file mode 100644 index 4cf46f4..0000000 --- a/edoc/machi_proxy_flu1_client.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - -Module machi_proxy_flu1_client - - - - -
- -

Module machi_proxy_flu1_client

-Erlang API for the Machi FLU TCP protocol version 1, with a -proxy-process style API for hiding messy details such as TCP -connection/disconnection with the remote Machi server. - -

Behaviours: gen_server.

- -

Description

Erlang API for the Machi FLU TCP protocol version 1, with a -proxy-process style API for hiding messy details such as TCP -connection/disconnection with the remote Machi server.

- -

Machi is intentionally avoiding using distributed Erlang for - Machi's communication. This design decision makes Erlang-side code - more difficult & complex, but it's the price to pay for some -language independence. Later in Machi's life cycle, we need to -(re-)implement some components in a non-Erlang/BEAM-based language.

- - This module implements a "man in the middle" proxy between the - Erlang client and Machi server (which is on the "far side" of a TCP - connection to somewhere). This proxy process will always execute - on the same Erlang node as the Erlang client that uses it. The - proxy is intended to be a stable, long-lived process that survives - TCP communication problems with the remote server. -

Function Index

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
append_chunk/4Append a chunk (binary- or iolist-style) of data to a file - with Prefix.
append_chunk/5Append a chunk (binary- or iolist-style) of data to a file - with Prefix.
checksum_list/3Fetch the list of chunk checksums for File.
checksum_list/4Fetch the list of chunk checksums for File.
code_change/3
get_all_projections/2Get all projections from the FLU's projection store.
get_all_projections/3Get all projections from the FLU's projection store.
get_latest_epoch/2Get the latest epoch number + checksum from the FLU's projection store.
get_latest_epoch/3Get the latest epoch number + checksum from the FLU's projection store.
handle_call/3
handle_cast/2
handle_info/2
init/1
list_all_projections/2Get all epoch numbers from the FLU's projection store.
list_all_projections/3Get all epoch numbers from the FLU's projection store.
list_files/2Fetch the list of all files on the remote FLU.
list_files/3Fetch the list of all files on the remote FLU.
quit/1Quit & close the connection to remote FLU and stop our - proxy process.
read_chunk/5Read a chunk of data of size Size from File at Offset.
read_chunk/6Read a chunk of data of size Size from File at Offset.
read_latest_projection/2Get the latest projection from the FLU's projection store for ProjType
read_latest_projection/3Get the latest projection from the FLU's projection store for ProjType
read_projection/3Read a projection Proj of type ProjType.
read_projection/4Read a projection Proj of type ProjType.
start_link/1Start a local, long-lived process that will be our steady - & reliable communication proxy with the fickle & flaky - remote Machi server.
terminate/2
write_projection/3Write a projection Proj of type ProjType.
write_projection/4Write a projection Proj of type ProjType.
- -

Function Details

- -

append_chunk/4

-
-

append_chunk(PidSpec, EpochID, Prefix, Chunk) -> any()

-

Append a chunk (binary- or iolist-style) of data to a file - with Prefix.

- -

append_chunk/5

-
-

append_chunk(PidSpec, EpochID, Prefix, Chunk, Timeout) -> any()

-

Append a chunk (binary- or iolist-style) of data to a file - with Prefix.

- -

checksum_list/3

-
-

checksum_list(PidSpec, EpochID, File) -> any()

-

Fetch the list of chunk checksums for File.

- -

checksum_list/4

-
-

checksum_list(PidSpec, EpochID, File, Timeout) -> any()

-

Fetch the list of chunk checksums for File.

- -

code_change/3

-
-

code_change(OldVsn, S, Extra) -> any()

-
- -

get_all_projections/2

-
-

get_all_projections(PidSpec, ProjType) -> any()

-

Get all projections from the FLU's projection store.

- -

get_all_projections/3

-
-

get_all_projections(PidSpec, ProjType, Timeout) -> any()

-

Get all projections from the FLU's projection store.

- -

get_latest_epoch/2

-
-

get_latest_epoch(PidSpec, ProjType) -> any()

-

Get the latest epoch number + checksum from the FLU's projection store.

- -

get_latest_epoch/3

-
-

get_latest_epoch(PidSpec, ProjType, Timeout) -> any()

-

Get the latest epoch number + checksum from the FLU's projection store.

- -

handle_call/3

-
-

handle_call(Request, From, S) -> any()

-
- -

handle_cast/2

-
-

handle_cast(Msg, S) -> any()

-
- -

handle_info/2

-
-

handle_info(Info, S) -> any()

-
- -

init/1

-
-

init(X1) -> any()

-
- -

list_all_projections/2

-
-

list_all_projections(PidSpec, ProjType) -> any()

-

Get all epoch numbers from the FLU's projection store.

- -

list_all_projections/3

-
-

list_all_projections(PidSpec, ProjType, Timeout) -> any()

-

Get all epoch numbers from the FLU's projection store.

- -

list_files/2

-
-

list_files(PidSpec, EpochID) -> any()

-

Fetch the list of all files on the remote FLU.

- -

list_files/3

-
-

list_files(PidSpec, EpochID, Timeout) -> any()

-

Fetch the list of all files on the remote FLU.

- -

quit/1

-
-

quit(PidSpec) -> any()

-

Quit & close the connection to remote FLU and stop our - proxy process.

- -

read_chunk/5

-
-

read_chunk(PidSpec, EpochID, File, Offset, Size) -> any()

-

Read a chunk of data of size Size from File at Offset.

- -

read_chunk/6

-
-

read_chunk(PidSpec, EpochID, File, Offset, Size, Timeout) -> any()

-

Read a chunk of data of size Size from File at Offset.

- -

read_latest_projection/2

-
-

read_latest_projection(PidSpec, ProjType) -> any()

-

Get the latest projection from the FLU's projection store for ProjType

- -

read_latest_projection/3

-
-

read_latest_projection(PidSpec, ProjType, Timeout) -> any()

-

Get the latest projection from the FLU's projection store for ProjType

- -

read_projection/3

-
-

read_projection(PidSpec, ProjType, Epoch) -> any()

-

Read a projection Proj of type ProjType.

- -

read_projection/4

-
-

read_projection(PidSpec, ProjType, Epoch, Timeout) -> any()

-

Read a projection Proj of type ProjType.

- -

start_link/1

-
-

start_link(P_srvr) -> any()

-

Start a local, long-lived process that will be our steady - & reliable communication proxy with the fickle & flaky - remote Machi server.

- -

terminate/2

-
-

terminate(Reason, S) -> any()

-
- -

write_projection/3

-
-

write_projection(PidSpec, ProjType, Proj) -> any()

-

Write a projection Proj of type ProjType.

- -

write_projection/4

-
-

write_projection(PidSpec, ProjType, Proj, Timeout) -> any()

-

Write a projection Proj of type ProjType.

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_sequencer.html b/edoc/machi_sequencer.html deleted file mode 100644 index 0956867..0000000 --- a/edoc/machi_sequencer.html +++ /dev/null @@ -1,23 +0,0 @@ - - - - -Module machi_sequencer - - - - -
- -

Module machi_sequencer

-"Mothballed" sequencer code, perhaps to be reused sometime in - the future?. - - -

Description

"Mothballed" sequencer code, perhaps to be reused sometime in - the future?
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_sup.html b/edoc/machi_sup.html deleted file mode 100644 index aaa08c6..0000000 --- a/edoc/machi_sup.html +++ /dev/null @@ -1,39 +0,0 @@ - - - - -Module machi_sup - - - - -
- -

Module machi_sup

-Top Machi application supervisor. - -

Behaviours: supervisor.

- -

Description

Top Machi application supervisor. -

Function Index

- - -
init/1
start_link/0
- -

Function Details

- -

init/1

-
-

init(X1) -> any()

-
- -

start_link/0

-
-

start_link() -> any()

-
-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/machi_util.html b/edoc/machi_util.html deleted file mode 100644 index f85a247..0000000 --- a/edoc/machi_util.html +++ /dev/null @@ -1,150 +0,0 @@ - - - - -Module machi_util - - - - -
- -

Module machi_util

-Miscellaneous utility functions. - - -

Description

Miscellaneous utility functions. -

Function Index

- - - - - - - - - - - - - - - - - - - -
bin_to_hexstr/1Convert a binary() to a hexadecimal string.
checksum_chunk/1Calculate a checksum for a chunk of file data.
connect/2Create a TCP connection to a remote Machi server.
connect/3Create a TCP connection to a remote Machi server.
hexstr_to_bin/1Convert a hexadecimal string to a binary().
hexstr_to_int/1Convert a hexadecimal string to an integer.
increment_max_filenum/2Increase the file size of a config file, which is used as the - basis for a minimum sequence number.
info_msg/2Log an 'info' level message.
int_to_hexbin/2Convert an integer into a hexadecimal string (in binary() - form) whose length is based on I_size.
int_to_hexstr/2Convert an integer into a hexadecimal string whose length is - based on I_size.
make_binary/1Convert a compatible Erlang data type into a binary() equivalent.
make_checksum_filename/2Calculate a checksum file path, by common convention.
make_data_filename/2Calculate a file data file path, by common convention.
make_projection_filename/2Calculate a projection store file path, by common convention.
make_regname/1Create a registered name atom for FLU sequencer internal - rendezvous/message passing use.
make_string/1Convert a compatible Erlang data type into a string() equivalent.
read_max_filenum/2Read the file size of a config file, which is used as the - basis for a minimum sequence number.
verb/1Log a verbose message.
verb/2Log a verbose message.
- -

Function Details

- -

bin_to_hexstr/1

-
-

bin_to_hexstr(X1) -> any()

-

Convert a binary() to a hexadecimal string.

- -

checksum_chunk/1

-
-

checksum_chunk(Chunk) -> any()

-

Calculate a checksum for a chunk of file data.

- -

connect/2

-
-

connect(Host::inet:ip_address() | inet:hostname(), Port::inet:port_number()) -> port()

-

Create a TCP connection to a remote Machi server.

- -

connect/3

-
-

connect(Host::inet:ip_address() | inet:hostname(), Port::inet:port_number(), Timeout::timeout()) -> port()

-

Create a TCP connection to a remote Machi server.

- -

hexstr_to_bin/1

-
-

hexstr_to_bin(S) -> any()

-

Convert a hexadecimal string to a binary().

- -

hexstr_to_int/1

-
-

hexstr_to_int(X) -> any()

-

Convert a hexadecimal string to an integer.

- -

increment_max_filenum/2

-
-

increment_max_filenum(DataDir, Prefix) -> any()

-

Increase the file size of a config file, which is used as the - basis for a minimum sequence number.

- -

info_msg/2

-
-

info_msg(Fmt, Args) -> any()

-

Log an 'info' level message.

- -

int_to_hexbin/2

-
-

int_to_hexbin(I, I_size) -> any()

-

Convert an integer into a hexadecimal string (in binary() - form) whose length is based on I_size.

- -

int_to_hexstr/2

-
-

int_to_hexstr(I, I_size) -> any()

-

Convert an integer into a hexadecimal string whose length is - based on I_size.

- -

make_binary/1

-
-

make_binary(X) -> any()

-

Convert a compatible Erlang data type into a binary() equivalent.

- -

make_checksum_filename/2

-
-

make_checksum_filename(DataDir, FileName) -> any()

-

Calculate a checksum file path, by common convention.

- -

make_data_filename/2

-
-

make_data_filename(DataDir, File) -> any()

-

Calculate a file data file path, by common convention.

- -

make_projection_filename/2

-
-

make_projection_filename(DataDir, File) -> any()

-

Calculate a projection store file path, by common convention.

- -

make_regname/1

-
-

make_regname(Prefix) -> any()

-

Create a registered name atom for FLU sequencer internal - rendezvous/message passing use.

- -

make_string/1

-
-

make_string(X) -> any()

-

Convert a compatible Erlang data type into a string() equivalent.

- -

read_max_filenum/2

-
-

read_max_filenum(DataDir, Prefix) -> any()

-

Read the file size of a config file, which is used as the - basis for a minimum sequence number.

- -

verb/1

-
-

verb(Fmt) -> any()

-

Log a verbose message.

- -

verb/2

-
-

verb(Fmt, Args) -> any()

-

Log a verbose message.

-
- - -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/modules-frame.html b/edoc/modules-frame.html deleted file mode 100644 index fbe4efc..0000000 --- a/edoc/modules-frame.html +++ /dev/null @@ -1,24 +0,0 @@ - - - -The machi application - - - -

Modules

- - - - - - - - - - - - - -
machi_admin_util
machi_app
machi_chain_manager1
machi_chash
machi_flu1
machi_flu1_client
machi_flu_sup
machi_projection
machi_projection_store
machi_proxy_flu1_client
machi_sequencer
machi_sup
machi_util
- - \ No newline at end of file diff --git a/edoc/overview-summary.html b/edoc/overview-summary.html deleted file mode 100644 index a29913c..0000000 --- a/edoc/overview-summary.html +++ /dev/null @@ -1,185 +0,0 @@ - - - - -Machi: a small village of replicated files - - - - - -

Machi: a small village of replicated files -

- - -

About This EDoc Documentation

- -

This EDoc-style documentation will concern itself only with Erlang -function APIs and function & data types. Higher-level design and -commentary will remain outside of the Erlang EDoc system; please see -the "Pointers to Other Machi Documentation" section below for more -details.

- -

Readers should beware that this documentation may be out-of-sync with -the source code. When in doubt, use the make edoc command to -regenerate all HTML pages.

- -

It is the developer's responsibility to re-generate the documentation -periodically and commit it to the Git repo.

- -

Machi Code Overview

- -

Chain Manager

- -

The Chain Manager is responsible for managing the state of Machi's -"Chain Replication" state. This role is roughly analogous to the -"Riak Core" application inside of Riak, which takes care of -coordinating replica placement and replica repair.

- -

For each primitive data server in the cluster, a Machi FLU, there is a -Chain Manager process that manages its FLU's role within the Machi -cluster's Chain Replication scheme. Each Chain Manager process -executes locally and independently to manage the distributed state of -a single Machi Chain Replication chain.

- -
    - -
  • To contrast with Riak Core ... Riak Core's claimant process is - solely responsible for managing certain critical aspects of - Riak Core distributed state. Machi's Chain Manager process - performs similar tasks as Riak Core's claimant. However, Machi - has several active Chain Manager processes, one per FLU server, - instead of a single active process like Core's claimant. Each - Chain Manager process acts independently; each is constrained - so that it will reach consensus via independent computation - & action.

    - - Full discussion of this distributed consensus is outside the - scope of this document; see the "Pointers to Other Machi - Documentation" section below for more information. -
  • -
  • Machi differs from a Riak Core application because Machi's - replica placement policy is simply, "All Machi servers store - replicas of all Machi files". - Machi is intended to be a primitive building block for creating larger - cluster-of-clusters where files are - distributed/fragmented/sharded across a large pool of - independent Machi clusters. -
  • -
  • See - https://www.usenix.org/legacy/events/osdi04/tech/renesse.html - for a copy of the paper, "Chain Replication for Supporting High - Throughput and Availability" by Robbert van Renesse and Fred - B. Schneider. -
  • -
- -

FLU

- -

The FLU is the basic storage server for Machi.

- -
    -
  • The name FLU is taken from "flash storage unit" from the paper - "CORFU: A Shared Log Design for Flash Clusters" by - Balakrishnan, Malkhi, Prabhakaran, and Wobber. See - https://www.usenix.org/conference/nsdi12/technical-sessions/presentation/balakrishnan -
  • -
  • In CORFU, the sequencer step is a prerequisite step that is - performed by a separate component, the Sequencer. - In Machi, the append_chunk() protocol message has - an implicit "sequencer" operation applied by the "head" of the - Machi Chain Replication chain. If a client wishes to write - data that has already been assigned a sequencer position, then - the write_chunk() API function is used. -
  • -
- -

For each FLU, there are three independent tasks that are implemented -using three different Erlang processes:

- -
    -
  • A FLU server, implemented primarily by machi_flu.erl. -
  • -
  • A projection store server, implemented primarily by - machi_projection_store.erl. -
  • -
  • A chain state manager server, implemented primarily by - machi_chain_manager1.erl. -
  • -
- -

From the perspective of failure detection, it is very convenient that -all three FLU-related services (file server, sequencer server, and -projection server) are accessed using the same single TCP port.

- -

Projection (data structure)

- -

The projection is a data structure that specifies the current state -of the Machi cluster: all FLUs, which FLUS are considered -up/running or down/crashed/stopped, which FLUs are actively -participants in the Chain Replication protocol, and which FLUs are -under "repair" (i.e., having their data resyncronized when -newly-added to a cluster or when restarting after a crash).

- -

Projection Store (server)

- -

The projection store is a storage service that is implemented by an -Erlang/OTP gen_server process that is associated with each -FLU. Conceptually, the projection store is an array of -write-once registers. For each projection store register, the -key is a 2-tuple of an epoch number (non_neg_integer() type) -and a projection type (public or private type); the value is -a projection data structure (projection_v1() type).

- -

Client and Proxy Client

- -

Machi is intentionally avoiding using distributed Erlang for Machi's -communication. This design decision makes Erlang-side code more -difficult & complex but allows us the freedom of implementing -parts of Machi in other languages without major -protocol&API&glue code changes later in the product's -lifetime.

- -

There are two layers of interface for Machi clients.

- -
    -
  • The machi_flu1_client module implements an API that uses a - TCP socket directly. -
  • -
  • The machi_proxy_flu1_client module implements an API that - uses a local, long-lived gen_server process as a proxy for - the remote, perhaps disconnected-or-crashed Machi FLU server. -
  • -
- -

The types for both modules ought to be the same. However, due to -rapid code churn, some differences might exist. Any major difference -is (almost by definition) a bug: please open a GitHub issue to request -a correction.

- -

TODO notes

- -

Any use of the string "TODO" in upper/lower/mixed case, anywhere in -the code, is a reminder signal of unfinished work.

- -

Pointers to Other Machi Documentation

- -
    -
  • If you are viewing this document locally, please look in the - ../doc/ directory, -
  • -
  • If you are viewing this document via the Web, please find the - documentation via this link: - http://github.com/basho/machi/tree/master/doc/ - Please be aware that this link points to the master branch - of the Machi source repository and therefore may be - out-of-sync with non-master branch code. -
  • - -
- -
- -

Generated by EDoc, Apr 8 2015, 17:31:11.

- - diff --git a/edoc/overview.edoc b/edoc/overview.edoc deleted file mode 100644 index 04cf4cc..0000000 --- a/edoc/overview.edoc +++ /dev/null @@ -1,14 +0,0 @@ - -@title Machi: a small village of replicated files - -@doc - -Documentation for Machi is an ongoing challenge. Much of the -high-level design & commentary are outside of the Erlang EDoc system - -Zoom2 zoom zoom zoom boom boom boom boom - -Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba -tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango -Rumba tango Rumba tango Rumba tango Rumba tango Rumba tango Rumba -tango Rumba tango Rumba tango Rumba tango Rumba tango diff --git a/edoc/packages-frame.html b/edoc/packages-frame.html deleted file mode 100644 index 189d01c..0000000 --- a/edoc/packages-frame.html +++ /dev/null @@ -1,11 +0,0 @@ - - - -The machi application - - - -

Packages

-
- - \ No newline at end of file diff --git a/edoc/stylesheet.css b/edoc/stylesheet.css deleted file mode 100644 index e426a90..0000000 --- a/edoc/stylesheet.css +++ /dev/null @@ -1,55 +0,0 @@ -/* standard EDoc style sheet */ -body { - font-family: Verdana, Arial, Helvetica, sans-serif; - margin-left: .25in; - margin-right: .2in; - margin-top: 0.2in; - margin-bottom: 0.2in; - color: #000000; - background-color: #ffffff; -} -h1,h2 { - margin-left: -0.2in; -} -div.navbar { - background-color: #add8e6; - padding: 0.2em; -} -h2.indextitle { - padding: 0.4em; - background-color: #add8e6; -} -h3.function,h3.typedecl { - background-color: #add8e6; - padding-left: 1em; -} -div.spec { - margin-left: 2em; - background-color: #eeeeee; -} -a.module,a.package { - text-decoration:none -} -a.module:hover,a.package:hover { - background-color: #eeeeee; -} -ul.definitions { - list-style-type: none; -} -ul.index { - list-style-type: none; - background-color: #eeeeee; -} - -/* - * Minor style tweaks - */ -ul { - list-style-type: square; -} -table { - border-collapse: collapse; -} -td { - padding: 3 -} From ad9525c5678164c745f21437dddc8c18dba29a09 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Wed, 8 Apr 2015 18:06:08 +0900 Subject: [PATCH 08/22] Add doc/README.md --- doc/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 doc/README.md diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 0000000..74849f7 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,12 @@ +## Machi Documentation Overview + +For a Web-browsable version of a snapshot of the source doc "EDoc" +Erlang documentation, please use this link: +[Machi EDoc snapshot](https://basho.github.io/machi/edoc/). + +## Documents in this directory + +* __chain-self-management-sketch.org__ is an introduction to the +self-management algorithm proposed for Machi. This algorithm is +(hoped to be) sufficient for managing the Chain Replication state of a +Machi cluster. From 0b2866d1023690bca9491685364dc91a30023a42 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Wed, 8 Apr 2015 18:39:55 +0900 Subject: [PATCH 09/22] Add -spec statements to machi_util.erl, clean up the fallout --- .gitignore | 5 +--- src/machi_flu1.erl | 11 +++++---- src/machi_util.erl | 59 ++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 180a370..09dacbc 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,4 @@ deps erl_crash.dump .concrete/DEV_MODE .rebar -doc/edoc-info -doc/erlang.png -doc/*.html -doc/stylesheet.css +edoc diff --git a/src/machi_flu1.erl b/src/machi_flu1.erl index cb2c5fc..459aad3 100644 --- a/src/machi_flu1.erl +++ b/src/machi_flu1.erl @@ -525,11 +525,14 @@ run_seq_append_server2(Prefix, DataDir) -> end. +-spec seq_name_hack() -> string(). +seq_name_hack() -> + lists:flatten(io_lib:format("~.36B~.36B", + [element(3,now()), + list_to_integer(os:getpid())])). + seq_append_server_loop(DataDir, Prefix, FileNum) -> - SequencerNameHack = lists:flatten(io_lib:format( - "~.36B~.36B", - [element(3,now()), - list_to_integer(os:getpid())])), + SequencerNameHack = seq_name_hack(), {File, FullPath} = machi_util:make_data_filename( DataDir, Prefix, SequencerNameHack, FileNum), {ok, FHd} = file:open(FullPath, diff --git a/src/machi_util.erl b/src/machi_util.erl index 9efbbc0..6025c25 100644 --- a/src/machi_util.erl +++ b/src/machi_util.erl @@ -28,7 +28,9 @@ hexstr_to_int/1, int_to_hexstr/2, int_to_hexbin/2, make_binary/1, make_string/1, make_regname/1, - make_checksum_filename/2, make_data_filename/2, + make_config_filename/2, + make_checksum_filename/4, make_checksum_filename/2, + make_data_filename/4, make_data_filename/2, make_projection_filename/2, read_max_filenum/2, increment_max_filenum/2, info_msg/2, verb/1, verb/2, @@ -44,6 +46,8 @@ %% @doc Create a registered name atom for FLU sequencer internal %% rendezvous/message passing use. +-spec make_regname(binary()|list()) -> + atom(). make_regname(Prefix) when is_binary(Prefix) -> erlang:binary_to_atom(Prefix, latin1); make_regname(Prefix) when is_list(Prefix) -> @@ -51,17 +55,23 @@ make_regname(Prefix) when is_list(Prefix) -> %% @doc Calculate a config file path, by common convention. +-spec make_config_filename(string(), string()) -> + string(). make_config_filename(DataDir, Prefix) -> lists:flatten(io_lib:format("~s/config/~s", [DataDir, Prefix])). %% @doc Calculate a checksum file path, by common convention. +-spec make_checksum_filename(string(), string(), atom()|string()|binary(), integer()) -> + string(). make_checksum_filename(DataDir, Prefix, SequencerName, FileNum) -> lists:flatten(io_lib:format("~s/config/~s.~s.~w.csum", [DataDir, Prefix, SequencerName, FileNum])). %% @doc Calculate a checksum file path, by common convention. +-spec make_checksum_filename(string(), [] | string() | binary()) -> + string(). make_checksum_filename(DataDir, "") -> lists:flatten(io_lib:format("~s/config", [DataDir])); make_checksum_filename(DataDir, FileName) -> @@ -69,6 +79,18 @@ make_checksum_filename(DataDir, FileName) -> %% @doc Calculate a file data file path, by common convention. +-spec make_data_filename(string(), string(), atom()|string()|binary(), integer()) -> + {binary(), string()}. +make_data_filename(DataDir, Prefix, SequencerName, FileNum) -> + File = erlang:iolist_to_binary(io_lib:format("~s.~s.~w", + [Prefix, SequencerName, FileNum])), + FullPath = lists:flatten(io_lib:format("~s/data/~s", [DataDir, File])), + {File, FullPath}. + +%% @doc Calculate a file data file path, by common convention. + +-spec make_data_filename(string(), [] | string() | binary()) -> + {binary(), string()}. make_data_filename(DataDir, "") -> FullPath = lists:flatten(io_lib:format("~s/data", [DataDir])), {"", FullPath}; @@ -76,16 +98,10 @@ make_data_filename(DataDir, File) -> FullPath = lists:flatten(io_lib:format("~s/data/~s", [DataDir, File])), {File, FullPath}. -%% @doc Calculate a file data file path, by common convention. - -make_data_filename(DataDir, Prefix, SequencerName, FileNum) -> - File = erlang:iolist_to_binary(io_lib:format("~s.~s.~w", - [Prefix, SequencerName, FileNum])), - FullPath = lists:flatten(io_lib:format("~s/data/~s", [DataDir, File])), - {File, FullPath}. - %% @doc Calculate a projection store file path, by common convention. +-spec make_projection_filename(string(), [] | string()) -> + string(). make_projection_filename(DataDir, "") -> lists:flatten(io_lib:format("~s/projection", [DataDir])); make_projection_filename(DataDir, File) -> @@ -94,6 +110,8 @@ make_projection_filename(DataDir, File) -> %% @doc Read the file size of a config file, which is used as the %% basis for a minimum sequence number. +-spec read_max_filenum(string(), string()) -> + non_neg_integer(). read_max_filenum(DataDir, Prefix) -> case file:read_file_info(make_config_filename(DataDir, Prefix)) of {error, enoent} -> @@ -105,6 +123,8 @@ read_max_filenum(DataDir, Prefix) -> %% @doc Increase the file size of a config file, which is used as the %% basis for a minimum sequence number. +-spec increment_max_filenum(string(), string()) -> + ok | {error, term()}. increment_max_filenum(DataDir, Prefix) -> try {ok, FH} = file:open(make_config_filename(DataDir, Prefix), [append]), @@ -113,11 +133,13 @@ increment_max_filenum(DataDir, Prefix) -> ok = file:close(FH) catch error:{badmatch,_}=Error -> - {error, Error, erlang:get_stacktrace()} + {error, {Error, erlang:get_stacktrace()}} end. %% @doc Convert a hexadecimal string to a `binary()'. +-spec hexstr_to_bin(string() | binary()) -> + binary(). hexstr_to_bin(S) when is_list(S) -> hexstr_to_bin(S, []); hexstr_to_bin(B) when is_binary(B) -> @@ -131,6 +153,8 @@ hexstr_to_bin([X,Y|T], Acc) -> %% @doc Convert a `binary()' to a hexadecimal string. +-spec bin_to_hexstr(binary()) -> + string(). bin_to_hexstr(<<>>) -> []; bin_to_hexstr(<>) -> @@ -143,6 +167,8 @@ hex_digit(X) -> %% @doc Convert a compatible Erlang data type into a `binary()' equivalent. +-spec make_binary(binary() | iolist()) -> + binary(). make_binary(X) when is_binary(X) -> X; make_binary(X) when is_list(X) -> @@ -150,6 +176,8 @@ make_binary(X) when is_list(X) -> %% @doc Convert a compatible Erlang data type into a `string()' equivalent. +-spec make_string(binary() | iolist()) -> + string(). make_string(X) when is_list(X) -> lists:flatten(X); make_string(X) when is_binary(X) -> @@ -157,6 +185,8 @@ make_string(X) when is_binary(X) -> %% @doc Convert a hexadecimal string to an integer. +-spec hexstr_to_int(string() | binary()) -> + non_neg_integer(). hexstr_to_int(X) -> B = hexstr_to_bin(X), B_size = byte_size(B) * 8, @@ -166,27 +196,35 @@ hexstr_to_int(X) -> %% @doc Convert an integer into a hexadecimal string whose length is %% based on `I_size'. +-spec int_to_hexstr(non_neg_integer(), non_neg_integer()) -> + string(). int_to_hexstr(I, I_size) -> bin_to_hexstr(<>). %% @doc Convert an integer into a hexadecimal string (in `binary()' %% form) whose length is based on `I_size'. +-spec int_to_hexbin(non_neg_integer(), non_neg_integer()) -> + binary(). int_to_hexbin(I, I_size) -> list_to_binary(int_to_hexstr(I, I_size)). %% @doc Calculate a checksum for a chunk of file data. +-spec checksum_chunk(binary() | iolist()) -> + binary(). checksum_chunk(Chunk) when is_binary(Chunk); is_list(Chunk) -> crypto:hash(sha, Chunk). %% @doc Log a verbose message. +-spec verb(string()) -> term(). verb(Fmt) -> verb(Fmt, []). %% @doc Log a verbose message. +-spec verb(string(), list()) -> term(). verb(Fmt, Args) -> case application:get_env(kernel, verbose) of {ok, true} -> io:format(Fmt, Args); @@ -195,6 +233,7 @@ verb(Fmt, Args) -> %% @doc Log an 'info' level message. +-spec info_msg(string(), list()) -> term(). info_msg(Fmt, Args) -> case application:get_env(kernel, verbose) of {ok, false} -> ok; _ -> error_logger:info_msg(Fmt, Args) From ce67fb662a519c0866a3bfba7b3681618aa8739a Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 12:16:58 +0900 Subject: [PATCH 10/22] WIP: more projection refactoring, eunit tests pass for the moment --- include/machi_chain_manager.hrl | 8 +- include/machi_projection.hrl | 26 ++++-- rebar.config | 2 + src/machi_chain_manager1.erl | 18 ++-- src/machi_projection.erl | 68 +++++++++----- src/machi_proxy_flu1_client.erl | 101 --------------------- test/machi_chain_manager1_test.erl | 14 ++- test/machi_flu1_test.erl | 3 +- test/machi_projection_test.erl | 47 ++++++---- test/machi_proxy_flu1_client_test.erl | 125 ++++++++++++++++++++++++++ 10 files changed, 248 insertions(+), 164 deletions(-) create mode 100644 test/machi_proxy_flu1_client_test.erl diff --git a/include/machi_chain_manager.hrl b/include/machi_chain_manager.hrl index 7a100b1..006a4b5 100644 --- a/include/machi_chain_manager.hrl +++ b/include/machi_chain_manager.hrl @@ -18,23 +18,27 @@ %% %% ------------------------------------------------------------------- +-include("machi_projection.hrl"). + -define(NOT_FLAPPING, {0,0,0}). -type projection() :: #projection_v1{}. -record(ch_mgr, { init_finished :: boolean(), + active_p :: boolean(), name :: pv1_server(), proj :: projection(), proj_history :: queue(), myflu :: pid() | atom(), flap_limit :: non_neg_integer(), %% - runenv :: list(), %proplist() - opts :: list(), %proplist() flaps=0 :: integer(), flap_start = ?NOT_FLAPPING :: erlang:now(), + runenv :: list(), %proplist() + opts :: list(), %proplist() + members_dict :: p_srvr_dict(), %% Deprecated ... TODO: remove when old test unit test code is removed proj_proposed :: 'none' | projection() diff --git a/include/machi_projection.hrl b/include/machi_projection.hrl index 2e35aed..ea007c5 100644 --- a/include/machi_projection.hrl +++ b/include/machi_projection.hrl @@ -18,19 +18,33 @@ %% %% ------------------------------------------------------------------- +-ifndef(MACHI_PROJECTION_HRL). +-define(MACHI_PROJECTION_HRL, true). + -type pv1_csum() :: binary(). -type pv1_epoch() :: {pv1_epoch_n(), pv1_csum()}. -type pv1_epoch_n() :: non_neg_integer(). -type pv1_server() :: atom() | binary(). -type pv1_timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}. +-record(p_srvr, { + name :: pv1_server(), + proto = 'ipv4' :: 'ipv4' | 'disterl', % disterl? Hrm. + address :: term(), % Protocol-specific + port :: term(), % Protocol-specific + props = [] :: list() % proplist for other related info + }). + +-type p_srvr() :: #p_srvr{}. +-type p_srvr_dict() :: orddict:orddict(pv1_server(), p_srvr()). + -define(DUMMY_PV1_EPOCH, {0,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>}). -record(projection_v1, { epoch_number :: pv1_epoch_n(), epoch_csum :: pv1_csum(), all_members :: [pv1_server()], - member_dict :: orddict:orddict(), + members_dict :: p_srvr_dict(), down :: [pv1_server()], creation_time :: pv1_timestamp(), author_server :: pv1_server(), @@ -42,12 +56,6 @@ -define(MACHI_DEFAULT_TCP_PORT, 50000). --record(p_srvr, { - name :: pv1_server(), - proto = 'ipv4' :: 'ipv4' | 'disterl', % disterl? Hrm. - address :: term(), % Protocol-specific - port :: term(), % Protocol-specific - props = [] :: list() % proplist for other related info - }). - -define(SHA_MAX, (1 bsl (20*8))). + +-endif. % !MACHI_PROJECTION_HRL diff --git a/rebar.config b/rebar.config index afb0283..88f9c3d 100644 --- a/rebar.config +++ b/rebar.config @@ -1,3 +1,5 @@ +{require_otp_vsn, "17"}. + %%% {erl_opts, [warnings_as_errors, {parse_transform, lager_transform}, debug_info]}. {erl_opts, [{parse_transform, lager_transform}, debug_info]}. {edoc_opts, [{dir, "./edoc"}]}. diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index ef81558..83c8b21 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -132,7 +132,7 @@ test_react_to_env(Pid) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -init({MyName, All_list, MyFLUPid, MgrOpts}) -> +init({MyName, All_list, MembersDict, MgrOpts}) -> RunEnv = [%% {seed, Seed}, {seed, now()}, {network_partitions, []}, @@ -144,10 +144,12 @@ init({MyName, All_list, MyFLUPid, MgrOpts}) -> NoneProj = make_initial_projection(MyName, All_list, [], [], []), S = #ch_mgr{init_finished=false, + active_p=proplists:get_value(active_mode, MgrOpts, true), name=MyName, proj=NoneProj, proj_history=queue:new(), - myflu=MyFLUPid, % pid or atom local name + myflu=MyName, + members_dict=MembersDict, %% TODO 2015-03-04: revisit, should this constant be bigger? %% Yes, this should be bigger, but it's a hack. There is %% no guarantee that all parties will advance to a minimum @@ -170,6 +172,10 @@ init({MyName, All_list, MyFLUPid, MgrOpts}) -> self() ! {finish_init, BestProj}, {ok, S}. +handle_call({ping}, _From, S) -> + {reply, pong, S}; +handle_call({stop}, _From, S) -> + {stop, normal, ok, S}; handle_call(_Call, _From, #ch_mgr{init_finished=false} = S) -> {reply, not_initialized, S}; handle_call({test_write_proposed_projection}, _From, S) -> @@ -179,10 +185,6 @@ handle_call({test_write_proposed_projection}, _From, S) -> {Res, S2} = do_cl_write_proposed_proj(S), {reply, Res, S2} end; -handle_call({ping}, _From, S) -> - {reply, pong, S}; -handle_call({stop}, _From, S) -> - {stop, normal, ok, S}; handle_call({test_calc_projection, KeepRunenvP}, _From, #ch_mgr{name=MyName}=S) -> RelativeToServer = MyName, @@ -226,10 +228,12 @@ code_change(_OldVsn, S, _Extra) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +finish_init(BestProj, #ch_mgr{active_p=false}=S) -> + _ = erlang:send_after(1000, self(), {finish_init, BestProj}), + S; finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> case ?FLU_PC:read_latest_projection(MyFLU, private) of {error, not_written} -> - Epoch = BestProj#projection_v1.epoch_number, case ?FLU_PC:write_projection(MyFLU, private, BestProj) of ok -> S#ch_mgr{init_finished=true, proj=BestProj}; diff --git a/src/machi_projection.erl b/src/machi_projection.erl index 42bfc8a..b8779f2 100644 --- a/src/machi_projection.erl +++ b/src/machi_projection.erl @@ -29,44 +29,36 @@ update_projection_checksum/1, update_projection_dbg2/2, compare/2, - make_projection_summary/1 + make_projection_summary/1, + make_members_dict/1 ]). %% @doc Create a new projection record. -new(MyName, All_list, UPI_list, Down_list, Repairing_list, Ps) -> - new(0, MyName, All_list, Down_list, UPI_list, Repairing_list, Ps). +new(MyName, MemberDict, UPI_list, Down_list, Repairing_list, Ps) -> + new(0, MyName, MemberDict, Down_list, UPI_list, Repairing_list, Ps). %% @doc Create a new projection record. -new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, Dbg) -> - new(EpochNum, MyName, All_list, Down_list, UPI_list, Repairing_list, +new(EpochNum, MyName, MemberDict, Down_list, UPI_list, Repairing_list, Dbg) -> + new(EpochNum, MyName, MemberDict, Down_list, UPI_list, Repairing_list, Dbg, []). %% @doc Create a new projection record. +%% +%% The `MemberDict0' argument may be a true `p_srvr_dict()' (i.e, it +%% is a well-formed `orddict' with the correct 2-tuple key-value form) +%% or it may be simply `list(p_srvr())', in which case we'll convert it +%% to a `p_srvr_dict()'. -new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, +new(EpochNum, MyName, MemberDict0, Down_list, UPI_list, Repairing_list, Dbg, Dbg2) when is_integer(EpochNum), EpochNum >= 0, is_atom(MyName) orelse is_binary(MyName), - is_list(All_list0), is_list(Down_list), is_list(UPI_list), + is_list(MemberDict0), is_list(Down_list), is_list(UPI_list), is_list(Repairing_list), is_list(Dbg), is_list(Dbg2) -> - {All_list, MemberDict} = - case lists:all(fun(P) when is_record(P, p_srvr) -> true; - (_) -> false - end, All_list0) of - true -> - All = [S#p_srvr.name || S <- All_list0], - TmpL = [{S#p_srvr.name, S} || S <- All_list0], - {All, orddict:from_list(TmpL)}; - false -> - All_list1 = lists:zip(All_list0,lists:seq(0,length(All_list0)-1)), - All_list2 = [#p_srvr{name=S, address="localhost", - port=?MACHI_DEFAULT_TCP_PORT+I} || - {S, I} <- All_list1], - TmpL = [{S#p_srvr.name, S} || S <- All_list2], - {All_list0, orddict:from_list(TmpL)} - end, + MembersDict = make_members_dict(MemberDict0), + All_list = [Name || {Name, _P} <- MembersDict], true = lists:all(fun(X) when is_atom(X) orelse is_binary(X) -> true; (_) -> false end, All_list), @@ -87,7 +79,7 @@ new(EpochNum, MyName, All_list0, Down_list, UPI_list, Repairing_list, creation_time=now(), author_server=MyName, all_members=All_list, - member_dict=MemberDict, + members_dict=MembersDict, down=Down_list, upi=UPI_list, repairing=Repairing_list, @@ -134,3 +126,31 @@ make_projection_summary(#projection_v1{epoch_number=EpochNum, [{epoch,EpochNum},{author,Author}, {upi,UPI_list},{repair,Repairing_list},{down,Down_list}, {d,Dbg}, {d2,Dbg2}]. + +%% @doc Make a `p_srvr_dict()' out of a list of `p_srvr()' or out of a +%% `p_srvr_dict()'. +%% +%% If `Ps' is a `p_srvr_dict()', then this function is usually a +%% no-op. However, if someone has tampered with the list and screwed +%% up its order, then we should fix it so `orddict' can work +%% correctly. +%% +%% If `Ps' is simply `list(p_srvr())', in which case we'll convert it +%% to a `p_srvr_dict()'. + +-spec make_members_dict(list(p_srvr()) | p_srvr_dict()) -> + p_srvr_dict(). +make_members_dict(Ps) -> + case lists:all(fun(P) when is_record(P, p_srvr) -> true; + (_) -> false + end, Ps) of + true -> + orddict:from_list([{P#p_srvr.name, P} || P <- Ps]); + false -> + case lists:all(fun({_K, P}) when is_record(P, p_srvr) -> true; + (_) -> false + end, Ps) of + true -> + orddict:from_list(Ps) + end % No false clause, crash it! + end. diff --git a/src/machi_proxy_flu1_client.erl b/src/machi_proxy_flu1_client.erl index f690c5c..ba3a3d2 100644 --- a/src/machi_proxy_flu1_client.erl +++ b/src/machi_proxy_flu1_client.erl @@ -307,104 +307,3 @@ disconnect(#state{sock=Sock, S#state{sock=undefined}; disconnect(S) -> S. - -%%%%%%%%%%%%%%%%%%%%%%%%%%% - --ifdef(TEST). - -dummy_server(Parent, TcpPort) -> - spawn_link(fun() -> - {ok, LSock} = gen_tcp:listen(TcpPort, - [{reuseaddr,true}, - {packet, line}, - {mode, binary}, - {active, false}]), - dummy_ack(Parent), - {ok, Sock} = gen_tcp:accept(LSock), - ok = inet:setopts(Sock, [{packet, line}]), - {ok, _Line} = gen_tcp:recv(Sock, 0), - ok = gen_tcp:send(Sock, "ERROR BADARG\n"), - (catch gen_tcp:close(Sock)), - unlink(Parent), - exit(normal) - end). - -dummy_ack(Parent) -> - Parent ! go. - -dummy_wait_for_ack() -> - receive go -> ok end. - -smoke_test() -> - TcpPort = 57123, - Me = self(), - _ServerPid = dummy_server(Me, TcpPort), - dummy_wait_for_ack(), - - I = #p_srvr{name=smoke, proto=ipv4, address="localhost", port=TcpPort}, - S0 = #state{i=I}, - false = connected_p(S0), - S1 = try_connect(S0), - true = connected_p(S1), - gen_tcp:send(S1#state.sock, "yo dawg\n"), - {ok, _Answer} = gen_tcp:recv(S1#state.sock, 0), - _S2 = disconnect(S1), - - ok. - -api_smoke_test() -> - RegName = api_smoke_flu, - Host = "localhost", - TcpPort = 57124, - DataDir = "./data.api_smoke_flu", - FLU1 = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir), - erase(flu_pid), - - try - I = #p_srvr{name=RegName, proto=ipv4, address=Host, port=TcpPort}, - {ok, Prox1} = start_link(I), - try - FakeEpoch = {-1, <<0:(20*8)/big>>}, - [{ok, {_,_,_}} = append_chunk(Prox1, - FakeEpoch, <<"prefix">>, <<"data">>, - infinity) || _ <- lists:seq(1,5)], - %% Stop the FLU, what happens? - machi_flu1:stop(FLU1), - {error,_} = append_chunk(Prox1, - FakeEpoch, <<"prefix">>, <<"data">>, - infinity), - {error,not_connected} = append_chunk(Prox1, - FakeEpoch, <<"prefix">>, <<"data">>, - infinity), - %% Start the FLU again, we should be able to do stuff immediately - FLU1b = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir, - [save_data_dir]), - put(flu_pid, FLU1b), - MyChunk = <<"my chunk data">>, - {ok, {MyOff,MySize,MyFile}} = - append_chunk(Prox1, FakeEpoch, <<"prefix">>, MyChunk, - infinity), - {ok, MyChunk} = read_chunk(Prox1, FakeEpoch, MyFile, MyOff, MySize), - - %% Alright, now for the rest of the API, whee - BadFile = <<"no-such-file">>, - {error, no_such_file} = checksum_list(Prox1, FakeEpoch, BadFile), - {ok, [_]} = list_files(Prox1, FakeEpoch), - {ok, FakeEpoch} = get_latest_epoch(Prox1, public), - {error, not_written} = read_latest_projection(Prox1, public), - {error, not_written} = read_projection(Prox1, public, 44), - P1 = machi_projection:new(1, a, [a], [], [a], [], []), - ok = write_projection(Prox1, public, P1), - {ok, P1} = read_projection(Prox1, public, 1), - {ok, [P1]} = get_all_projections(Prox1, public), - {ok, [1]} = list_all_projections(Prox1, public), - ok - after - _ = (catch quit(Prox1)) - end - after - (catch machi_flu1:stop(FLU1)), - (catch machi_flu1:stop(get(flu_pid))) - end. - --endif. % TEST diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 22a07db..598801c 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -135,15 +135,27 @@ chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> -ifndef(PULSE). smoke0_test() -> + %% TODO attack list: + %% 0. Add start option to chain manager to be "passive" only, i.e., + %% not immediately go to work on + %% 1. Start FLUs with full complement of FLU+proj+chmgr. + %% 2. Put each of them under a supervisor? + %% - Sup proc could be a created-specifically-for-test thing, perhaps? + %% Rather than relying on a supervisor with reg name + OTP app started + %% plus plus more more yaddayadda? + %% 3. Add projection catalog/orddict of #p_srvr records?? + %% 4. Backport the changes to smoke0_test(). + %% 5. Do it to smoke1 test, yadda... {ok, _} = machi_partition_simulator:start_link({1,2,3}, 50, 50), Host = "localhost", TcpPort = 6623, {ok, FLUa} = machi_flu1:start_link([{a,TcpPort,"./data.a"}]), Pa = #p_srvr{name=a, proto=ipv4, address=Host, port=TcpPort}, + P_Srvr_Dict = machi_projection:make_members_dict([Pa]), %% Egadz, more racing on startup, yay. TODO fix. timer:sleep(1), {ok, FLUaP} = ?FLU_PC:start_link(Pa), - {ok, M0} = ?MGR:start_link(a, [a,b,c], FLUaP), + {ok, M0} = ?MGR:start_link(a, [a,b,c], P_Srvr_Dict, [{active_mode, false}]), _SockA = machi_util:connect(Host, TcpPort), try pong = ?MGR:ping(M0) diff --git a/test/machi_flu1_test.erl b/test/machi_flu1_test.erl index fbfc0ae..b4580e2 100644 --- a/test/machi_flu1_test.erl +++ b/test/machi_flu1_test.erl @@ -136,7 +136,8 @@ flu_projection_smoke_test() -> {ok, []} = ?FLU_C:list_all_projections(Host, TcpPort, T), {ok, []} = ?FLU_C:get_all_projections(Host, TcpPort, T), - P1 = machi_projection:new(1, a, [a], [], [a], [], []), + P_a = #p_srvr{name=a}, + P1 = machi_projection:new(1, a, [P_a], [], [a], [], []), ok = ?FLU_C:write_projection(Host, TcpPort, T, P1), {error, written} = ?FLU_C:write_projection(Host, TcpPort, T, P1), {ok, P1} = ?FLU_C:read_projection(Host, TcpPort, T, 1), diff --git a/test/machi_projection_test.erl b/test/machi_projection_test.erl index f30411a..3f2d59a 100644 --- a/test/machi_projection_test.erl +++ b/test/machi_projection_test.erl @@ -25,36 +25,45 @@ -include("machi_projection.hrl"). +new_fake(Name) -> + #p_srvr{name=Name}. + +%% Bleh, hey QuickCheck ... except that any model probably equals +%% code under test, bleh. + new_test() -> - %% Bleh, hey QuickCheck ... except that any model probably equals - %% code under test, bleh. - true = try_it(a, [a,b,c], [a,b], [], [c], []), - true = try_it(<<"a">>, [<<"a">>,b,c], [<<"a">>,b], [], [c], []), - Servers = [#p_srvr{name=a}, #p_srvr{name=b}, #p_srvr{name=c}], - Servers_bad1 = [#p_srvr{name= <<"a">>}, #p_srvr{name=b}, #p_srvr{name=c}], - Servers_bad2 = [#p_srvr{name=z}, #p_srvr{name=b}, #p_srvr{name=c}], + All0 = [new_fake(X) || X <- [a,b,c]], + All_binA = [new_fake(<<"a">>)] ++ [new_fake(X) || X <- [b,c]], + + true = try_it(a, All0, [a,b], [], [c], []), + true = try_it(<<"a">>, All_binA, [<<"a">>,b], [], [c], []), + Servers = All0, + Servers_bad1 = [new_fake(X) || X <- [<<"a">>,b,c]], + Servers_bad2 = [new_fake(X) || X <- [z,b,c]], true = try_it(a, Servers, [a,b], [], [c], []), false = try_it(a, not_list, [a,b], [], [c], []), - false = try_it(a, [a,b,c], not_list, [], [c], []), - false = try_it(a, [a,b,c], [a,b], not_list, [c], []), - false = try_it(a, [a,b,c], [a,b], [], not_list, []), - false = try_it(a, [a,b,c], [a,b], [], [c], not_list), + false = try_it(a, All0, not_list, [], [c], []), + false = try_it(a, All0, [a,b], not_list, [c], []), + false = try_it(a, All0, [a,b], [], not_list, []), + false = try_it(a, All0, [a,b], [], [c], not_list), - false = try_it(<<"x">>, [a,b,c], [a,b], [], [c], []), - false = try_it(a, [a,b,c], [a,b,c], [], [c], []), - false = try_it(a, [a,b,c], [a,b], [c], [c], []), - false = try_it(a, [a,b,c], [a,b], [], [c,c], []), + false = try_it(<<"x">>, All0, [a,b], [], [c], []), + false = try_it(a, All0, [a,b,c], [], [c], []), + false = try_it(a, All0, [a,b], [c], [c], []), + false = try_it(a, All0, [a,b], [], [c,c], []), false = try_it(a, Servers_bad1, [a,b], [], [c], []), false = try_it(a, Servers_bad2, [a,b], [], [c], []), ok. compare_test() -> - P0 = machi_projection:new(0, a, [a,b,c], [a,b], [], [c], []), - P1a = machi_projection:new(1, a, [a,b,c], [a,b], [], [c], []), - P1b = machi_projection:new(1, b, [a,b,c], [a,b], [], [c], []), - P2 = machi_projection:new(2, a, [a,b,c], [a,b], [], [c], []), + All0 = [new_fake(X) || X <- [a,b,c]], + + P0 = machi_projection:new(0, a, All0, [a,b], [], [c], []), + P1a = machi_projection:new(1, a, All0, [a,b], [], [c], []), + P1b = machi_projection:new(1, b, All0, [a,b], [], [c], []), + P2 = machi_projection:new(2, a, All0, [a,b], [], [c], []), 0 = machi_projection:compare(P0, P0), -1 = machi_projection:compare(P0, P1a), diff --git a/test/machi_proxy_flu1_client_test.erl b/test/machi_proxy_flu1_client_test.erl new file mode 100644 index 0000000..0e8974f --- /dev/null +++ b/test/machi_proxy_flu1_client_test.erl @@ -0,0 +1,125 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2007-2015 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(machi_proxy_flu1_client_test). + +-include("machi_projection.hrl"). + +-define(MUT, machi_proxy_flu1_client). + +-ifdef(TEST). + +%% dummy_server(Parent, TcpPort) -> +%% spawn_link(fun() -> +%% {ok, LSock} = gen_tcp:listen(TcpPort, +%% [{reuseaddr,true}, +%% {packet, line}, +%% {mode, binary}, +%% {active, false}]), +%% dummy_ack(Parent), +%% {ok, Sock} = gen_tcp:accept(LSock), +%% ok = inet:setopts(Sock, [{packet, line}]), +%% {ok, _Line} = gen_tcp:recv(Sock, 0), +%% ok = gen_tcp:send(Sock, "ERROR BADARG\n"), +%% (catch gen_tcp:close(Sock)), +%% unlink(Parent), +%% exit(normal) +%% end). + +%% dummy_ack(Parent) -> +%% Parent ! go. + +%% dummy_wait_for_ack() -> +%% receive go -> ok end. + +%% smoke_test() -> +%% TcpPort = 57123, +%% Me = self(), +%% _ServerPid = dummy_server(Me, TcpPort), +%% dummy_wait_for_ack(), + +%% I = #p_srvr{name=smoke, proto=ipv4, address="localhost", port=TcpPort}, +%% S0 = #state{i=I}, +%% false = connected_p(S0), +%% S1 = try_connect(S0), +%% true = connected_p(S1), +%% gen_tcp:send(S1#state.sock, "yo dawg\n"), +%% {ok, _Answer} = gen_tcp:recv(S1#state.sock, 0), +%% _S2 = disconnect(S1), + +%% ok. + +api_smoke_test() -> + RegName = api_smoke_flu, + Host = "localhost", + TcpPort = 57124, + DataDir = "./data.api_smoke_flu", + FLU1 = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir), + erase(flu_pid), + + try + I = #p_srvr{name=RegName, proto=ipv4, address=Host, port=TcpPort}, + {ok, Prox1} = ?MUT:start_link(I), + try + FakeEpoch = {-1, <<0:(20*8)/big>>}, + [{ok, {_,_,_}} = ?MUT:append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity) || _ <- lists:seq(1,5)], + %% Stop the FLU, what happens? + machi_flu1:stop(FLU1), + {error,_} = ?MUT:append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity), + {error,not_connected} = ?MUT:append_chunk(Prox1, + FakeEpoch, <<"prefix">>, <<"data">>, + infinity), + %% Start the FLU again, we should be able to do stuff immediately + FLU1b = machi_flu1_test:setup_test_flu(RegName, TcpPort, DataDir, + [save_data_dir]), + put(flu_pid, FLU1b), + MyChunk = <<"my chunk data">>, + {ok, {MyOff,MySize,MyFile}} = + ?MUT:append_chunk(Prox1, FakeEpoch, <<"prefix">>, MyChunk, + infinity), + {ok, MyChunk} = ?MUT:read_chunk(Prox1, FakeEpoch, MyFile, MyOff, MySize), + + %% Alright, now for the rest of the API, whee + BadFile = <<"no-such-file">>, + {error, no_such_file} = ?MUT:checksum_list(Prox1, FakeEpoch, BadFile), + {ok, [_]} = ?MUT:list_files(Prox1, FakeEpoch), + {ok, FakeEpoch} = ?MUT:get_latest_epoch(Prox1, public), + {error, not_written} = ?MUT:read_latest_projection(Prox1, public), + {error, not_written} = ?MUT:read_projection(Prox1, public, 44), + P_a = #p_srvr{name=a, address="localhost", port=6622}, + P1 = machi_projection:new(1, a, [P_a], [], [a], [], []), + ok = ?MUT:write_projection(Prox1, public, P1), + {ok, P1} = ?MUT:read_projection(Prox1, public, 1), + {ok, [P1]} = ?MUT:get_all_projections(Prox1, public), + {ok, [1]} = ?MUT:list_all_projections(Prox1, public), + ok + after + _ = (catch ?MUT:quit(Prox1)) + end + after + (catch machi_flu1:stop(FLU1)), + (catch machi_flu1:stop(get(flu_pid))) + end. + +-endif. % TEST From a92c5fec0a68ea068c2589496f038f40b3d605bb Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 12:21:15 +0900 Subject: [PATCH 11/22] WIP: remove old smoke0 code from machi_proxy_flu1_client_test.erl --- src/machi_projection.erl | 4 +++ test/machi_proxy_flu1_client_test.erl | 42 ++------------------------- 2 files changed, 6 insertions(+), 40 deletions(-) diff --git a/src/machi_projection.erl b/src/machi_projection.erl index b8779f2..6f7bd7f 100644 --- a/src/machi_projection.erl +++ b/src/machi_projection.erl @@ -102,6 +102,10 @@ update_projection_dbg2(P, Dbg2) when is_list(Dbg2) -> %% @doc Compare two projection records for equality (assuming that the %% checksum element has been correctly calculated). +%% +%% The name "compare" is probably too close to "rank"? This +%% comparison has nothing to do with projection ranking. +%% TODO: change the name of this function? -spec compare(#projection_v1{}, #projection_v1{}) -> integer(). diff --git a/test/machi_proxy_flu1_client_test.erl b/test/machi_proxy_flu1_client_test.erl index 0e8974f..a22456e 100644 --- a/test/machi_proxy_flu1_client_test.erl +++ b/test/machi_proxy_flu1_client_test.erl @@ -19,53 +19,15 @@ %% ------------------------------------------------------------------- -module(machi_proxy_flu1_client_test). +-compile(export_all). -include("machi_projection.hrl"). +-include_lib("eunit/include/eunit.hrl"). -define(MUT, machi_proxy_flu1_client). -ifdef(TEST). -%% dummy_server(Parent, TcpPort) -> -%% spawn_link(fun() -> -%% {ok, LSock} = gen_tcp:listen(TcpPort, -%% [{reuseaddr,true}, -%% {packet, line}, -%% {mode, binary}, -%% {active, false}]), -%% dummy_ack(Parent), -%% {ok, Sock} = gen_tcp:accept(LSock), -%% ok = inet:setopts(Sock, [{packet, line}]), -%% {ok, _Line} = gen_tcp:recv(Sock, 0), -%% ok = gen_tcp:send(Sock, "ERROR BADARG\n"), -%% (catch gen_tcp:close(Sock)), -%% unlink(Parent), -%% exit(normal) -%% end). - -%% dummy_ack(Parent) -> -%% Parent ! go. - -%% dummy_wait_for_ack() -> -%% receive go -> ok end. - -%% smoke_test() -> -%% TcpPort = 57123, -%% Me = self(), -%% _ServerPid = dummy_server(Me, TcpPort), -%% dummy_wait_for_ack(), - -%% I = #p_srvr{name=smoke, proto=ipv4, address="localhost", port=TcpPort}, -%% S0 = #state{i=I}, -%% false = connected_p(S0), -%% S1 = try_connect(S0), -%% true = connected_p(S1), -%% gen_tcp:send(S1#state.sock, "yo dawg\n"), -%% {ok, _Answer} = gen_tcp:recv(S1#state.sock, 0), -%% _S2 = disconnect(S1), - -%% ok. - api_smoke_test() -> RegName = api_smoke_flu, Host = "localhost", From 8deea3bb0149dc2f97eb7d303c186ad85d349647 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 14:44:58 +0900 Subject: [PATCH 12/22] WIP: smoke1 in chain manager works --- include/machi_chain_manager.hrl | 4 +- src/machi_chain_manager1.erl | 106 +++++++++++++---------------- test/machi_chain_manager1_test.erl | 29 ++++---- 3 files changed, 65 insertions(+), 74 deletions(-) diff --git a/include/machi_chain_manager.hrl b/include/machi_chain_manager.hrl index 006a4b5..ae8e25b 100644 --- a/include/machi_chain_manager.hrl +++ b/include/machi_chain_manager.hrl @@ -39,7 +39,5 @@ runenv :: list(), %proplist() opts :: list(), %proplist() members_dict :: p_srvr_dict(), - - %% Deprecated ... TODO: remove when old test unit test code is removed - proj_proposed :: 'none' | projection() + proxies_dict :: orddict:orddict(pv1_server(), pid()) }). diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 83c8b21..ac1e470 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -59,6 +59,7 @@ -define(FLU_C, machi_flu1_client). -define(FLU_PC, machi_proxy_flu1_client). +-define(TO, (2*1000)). % default timeout %% Keep a history of our flowchart execution in the process dictionary. -define(REACT(T), put(react, [T|get(react)])). @@ -73,8 +74,7 @@ -ifdef(TEST). -export([test_calc_projection/2, - test_calc_proposed_projection/1, - test_write_proposed_projection/1, + test_write_public_projection/2, test_read_latest_public_projection/2, test_react_to_env/1, get_all_hosed/1]). @@ -90,11 +90,11 @@ -compile(export_all). -endif. %TEST -start_link(MyName, All_list, MyFLUPid) -> - start_link(MyName, All_list, MyFLUPid, []). +start_link(MyName, All_list, MembersDict) -> + start_link(MyName, All_list, MembersDict, []). -start_link(MyName, All_list, MyFLUPid, MgrOpts) -> - gen_server:start_link(?MODULE, {MyName, All_list, MyFLUPid, MgrOpts}, []). +start_link(MyName, All_list, MembersDict, MgrOpts) -> + gen_server:start_link(?MODULE, {MyName, All_list, MembersDict, MgrOpts}, []). stop(Pid) -> gen_server:call(Pid, {stop}, infinity). @@ -106,21 +106,15 @@ ping(Pid) -> %% Test/debugging code only. -test_write_proposed_projection(Pid) -> - gen_server:call(Pid, {test_write_proposed_projection}, infinity). +test_write_public_projection(Pid, Proj) -> + gen_server:call(Pid, {test_write_public_projection, Proj}, infinity). %% Calculate a projection and return it to us. %% If KeepRunenvP is true, the server will retain its change in its %% runtime environment, e.g., changes in simulated network partitions. -%% The server's internal proposed projection is not altered. test_calc_projection(Pid, KeepRunenvP) -> gen_server:call(Pid, {test_calc_projection, KeepRunenvP}, infinity). -%% Async! -%% The server's internal proposed projection *is* altered. -test_calc_proposed_projection(Pid) -> - gen_server:cast(Pid, {test_calc_proposed_projection}). - test_read_latest_public_projection(Pid, ReadRepairP) -> gen_server:call(Pid, {test_read_latest_public_projection, ReadRepairP}, infinity). @@ -143,6 +137,11 @@ init({MyName, All_list, MembersDict, MgrOpts}) -> [], []), NoneProj = make_initial_projection(MyName, All_list, [], [], []), + Proxies = orddict:fold( + fun(K, P, Acc) -> + {ok, Pid} = ?FLU_PC:start_link(P), + [{K, Pid}|Acc] + end, [], MembersDict), S = #ch_mgr{init_finished=false, active_p=proplists:get_value(active_mode, MgrOpts, true), name=MyName, @@ -150,6 +149,7 @@ init({MyName, All_list, MembersDict, MgrOpts}) -> proj_history=queue:new(), myflu=MyName, members_dict=MembersDict, + proxies_dict=orddict:from_list(Proxies), %% TODO 2015-03-04: revisit, should this constant be bigger? %% Yes, this should be bigger, but it's a hack. There is %% no guarantee that all parties will advance to a minimum @@ -176,15 +176,6 @@ handle_call({ping}, _From, S) -> {reply, pong, S}; handle_call({stop}, _From, S) -> {stop, normal, ok, S}; -handle_call(_Call, _From, #ch_mgr{init_finished=false} = S) -> - {reply, not_initialized, S}; -handle_call({test_write_proposed_projection}, _From, S) -> - if S#ch_mgr.proj_proposed == none -> - {reply, none, S}; - true -> - {Res, S2} = do_cl_write_proposed_proj(S), - {reply, Res, S2} - end; handle_call({test_calc_projection, KeepRunenvP}, _From, #ch_mgr{name=MyName}=S) -> RelativeToServer = MyName, @@ -192,11 +183,16 @@ handle_call({test_calc_projection, KeepRunenvP}, _From, {reply, {ok, P}, if KeepRunenvP -> S2; true -> S end}; +handle_call({test_write_public_projection, Proj}, _From, S) -> + {Res, S2} = do_cl_write_public_proj(Proj, S), + {reply, Res, S2}; handle_call({test_read_latest_public_projection, ReadRepairP}, _From, S) -> {Perhaps, Val, ExtraInfo, S2} = do_cl_read_latest_public_projection(ReadRepairP, S), Res = {Perhaps, Val, ExtraInfo}, {reply, Res, S2}; +handle_call(_Call, _From, #ch_mgr{init_finished=false} = S) -> + {reply, not_initialized, S}; handle_call({test_react_to_env}, _From, S) -> {TODOtodo, S2} = do_react_to_env(S), {reply, TODOtodo, S2}; @@ -205,10 +201,6 @@ handle_call(_Call, _From, S) -> handle_cast(_Cast, #ch_mgr{init_finished=false} = S) -> {noreply, S}; -handle_cast({test_calc_proposed_projection}, #ch_mgr{name=MyName}=S) -> - RelativeToServer = MyName, - {Proj, S2} = calc_projection(S, RelativeToServer), - {noreply, S2#ch_mgr{proj_proposed=Proj}}; handle_cast(_Cast, S) -> ?D({cast_whaaaaaaaaaaa, _Cast}), {noreply, S}. @@ -232,9 +224,10 @@ finish_init(BestProj, #ch_mgr{active_p=false}=S) -> _ = erlang:send_after(1000, self(), {finish_init, BestProj}), S; finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> - case ?FLU_PC:read_latest_projection(MyFLU, private) of + MyFLUPid = proxy_pid(MyFLU, S), + case ?FLU_PC:read_latest_projection(MyFLUPid, private) of {error, not_written} -> - case ?FLU_PC:write_projection(MyFLU, private, BestProj) of + case ?FLU_PC:write_projection(MyFLUPid, private, BestProj) of ok -> S#ch_mgr{init_finished=true, proj=BestProj}; {error, not_written} -> @@ -251,14 +244,9 @@ finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> exit({yo_weird, Else}) end. -do_cl_write_proposed_proj(#ch_mgr{proj_proposed=Proj} = S) -> +do_cl_write_public_proj(Proj, S) -> #projection_v1{epoch_number=Epoch} = Proj, - case cl_write_public_proj(Epoch, Proj, S) of - {ok, _S2}=Res -> - Res; - {_Other2, _S2}=Else2 -> - Else2 - end. + cl_write_public_proj(Epoch, Proj, S). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -278,7 +266,7 @@ cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, {_UpNodes, Partitions, S2} = calc_up_nodes(S), Res0 = perhaps_call_t( S, Partitions, MyFLU, - fun() -> machi_flu0:proj_write(MyFLU, Epoch, public, Proj) end), + fun(Pid) -> ?FLU_PC:write_projection(Pid, public, Proj, ?TO) end), Continue = fun() -> FLUs = Proj#projection_v1.all_members -- [MyFLU], cl_write_public_proj_remote(FLUs, Partitions, Epoch, Proj, S) @@ -294,11 +282,11 @@ cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, {Else, S2} end. -cl_write_public_proj_remote(FLUs, Partitions, Epoch, Proj, S) -> +cl_write_public_proj_remote(FLUs, Partitions, _Epoch, Proj, S) -> %% We're going to be very care-free about this write because we'll rely %% on the read side to do any read repair. - DoIt = fun(X) -> machi_flu0:proj_write(X, Epoch, public, Proj) end, - Rs = [{FLU, perhaps_call_t(S, Partitions, FLU, fun() -> DoIt(FLU) end)} || + DoIt = fun(Pid) -> ?FLU_PC:write_projection(Pid, public, Proj, ?TO) end, + Rs = [{FLU, perhaps_call_t(S, Partitions, FLU, fun(Pid) -> DoIt(Pid) end)} || FLU <- FLUs], {{remote_write_results, Rs}, S}. @@ -323,13 +311,13 @@ read_latest_projection_call_only(ProjectionType, AllHosed, All_queried_list = All_list -- AllHosed, {_UpNodes, Partitions, S2} = calc_up_nodes(S), - DoIt = fun(X) -> - case machi_flu0:proj_read_latest(X, ProjectionType) of + DoIt = fun(Pid) -> + case ?FLU_PC:read_latest_projection(Pid, ProjectionType, ?TO) of {ok, P} -> P; Else -> Else end end, - Rs = [perhaps_call_t(S, Partitions, FLU, fun() -> DoIt(FLU) end) || + Rs = [perhaps_call_t(S, Partitions, FLU, fun(Pid) -> DoIt(Pid) end) || FLU <- All_queried_list], FLUsRs = lists:zip(All_queried_list, Rs), {All_queried_list, FLUsRs, S2}. @@ -523,8 +511,8 @@ check_latest_private_projections(FLUs, MyProj, Partitions, S) -> FoldFun = fun(_FLU, false) -> false; (FLU, true) -> - F = fun() -> - machi_flu0:proj_read_latest(FLU, private) + F = fun(Pid) -> + ?FLU_PC:read_latest_projection(Pid, private, ?TO) end, case perhaps_call_t(S, Partitions, FLU, F) of {ok, RemotePrivateProj} -> @@ -1003,8 +991,8 @@ react_to_env_C110(P_latest, #ch_mgr{myflu=MyFLU} = S) -> Islands--Islands |Extra_todo]), - Epoch = P_latest2#projection_v1.epoch_number, - ok = machi_flu0:proj_write(MyFLU, Epoch, private, P_latest2), + MyFLUPid = proxy_pid(MyFLU, S), + ok = ?FLU_PC:write_projection(MyFLUPid, private, P_latest2, ?TO), case proplists:get_value(private_write_verbose, S#ch_mgr.opts) of true -> {_,_,C} = os:timestamp(), @@ -1039,7 +1027,7 @@ react_to_env_C120(P_latest, #ch_mgr{proj_history=H} = S) -> ?REACT({c120, [{latest, make_projection_summary(P_latest)}]}), {{now_using, P_latest#projection_v1.epoch_number}, - S#ch_mgr{proj=P_latest, proj_history=H3, proj_proposed=none}}. + S#ch_mgr{proj=P_latest, proj_history=H3}}. react_to_env_C200(Retries, P_latest, S) -> ?REACT(c200), @@ -1454,14 +1442,14 @@ projection_transition_is_sane( S1 = make_projection_summary(P1), S2 = make_projection_summary(P2), Trace = erlang:get_stacktrace(), - %% TODO: this history goop is useful sometimes for debugging but - %% not for any "real" use. Get rid of it, for the long term. - H = (catch [{FLUName, Type, P#projection_v1.epoch_number, make_projection_summary(P)} || - FLUName <- P1#projection_v1.all_members, - Type <- [public,private], - P <- machi_flu0:proj_get_all(FLUName, Type)]), + %% %% TODO: this history goop is useful sometimes for debugging but + %% %% not for any "real" use. Get rid of it, for the long term. + %% H = (catch [{FLUName, Type, P#projection_v1.epoch_number, make_projection_summary(P)} || + %% FLUName <- P1#projection_v1.all_members, + %% Type <- [public,private], + %% P <- ?FLU_PC:proj_get_all(orddict:fetch(FLUName, What?), Type)]), {err, _Type, _Err, from, S1, to, S2, relative_to, RelativeToServer, - history, (catch lists:sort(H)), + history, (catch lists:sort([no_history])), stack, Trace} end. @@ -1545,6 +1533,9 @@ merge_flap_counts([FlapCount|Rest], D1) -> end, D1, D2), merge_flap_counts(Rest, D3). +proxy_pid(Name, #ch_mgr{proxies_dict=ProxiesDict}) -> + orddict:fetch(Name, ProxiesDict). + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% perhaps_call_t(S, Partitions, FLU, DoIt) -> @@ -1555,11 +1546,12 @@ perhaps_call_t(S, Partitions, FLU, DoIt) -> t_timeout end. -perhaps_call(#ch_mgr{name=MyName, myflu=MyFLU}, Partitions, FLU, DoIt) -> +perhaps_call(#ch_mgr{name=MyName, myflu=MyFLU}=S, Partitions, FLU, DoIt) -> + ProxyPid = proxy_pid(FLU, S), RemoteFLU_p = FLU /= MyFLU, case RemoteFLU_p andalso lists:member({MyName, FLU}, Partitions) of false -> - Res = DoIt(), + Res = DoIt(ProxyPid), case RemoteFLU_p andalso lists:member({FLU, MyName}, Partitions) of false -> Res; diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 598801c..9a25e49 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -136,7 +136,7 @@ chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> smoke0_test() -> %% TODO attack list: - %% 0. Add start option to chain manager to be "passive" only, i.e., + %% __ Add start option to chain manager to be "passive" only, i.e., %% not immediately go to work on %% 1. Start FLUs with full complement of FLU+proj+chmgr. %% 2. Put each of them under a supervisor? @@ -166,28 +166,29 @@ smoke0_test() -> ok = machi_partition_simulator:stop() end. -smoke1_testTODO() -> +smoke1_test() -> machi_partition_simulator:start_link({1,2,3}, 100, 0), - {ok, FLUa} = machi_flu0:start_link(a), - {ok, FLUb} = machi_flu0:start_link(b), - {ok, FLUc} = machi_flu0:start_link(c), - I_represent = I_am = a, - {ok, M0} = ?MGR:start_link(I_represent, [a,b,c], I_am), + TcpPort = 62777, + FluInfo = [{a,TcpPort+0,"./data.a"}, {b,TcpPort+1,"./data.b"}, {c,TcpPort+2,"./data.c"}], + P_s = [#p_srvr{name=Name, address="localhost", port=Port} || + {Name,Port,_Dir} <- FluInfo], + + FLUs = [element(2, machi_flu1:start_link([{Name,Port,Dir}])) || + {Name,Port,Dir} <- FluInfo], + MembersDict = machi_projection:make_members_dict(P_s), + I_represent = a, + {ok, M0} = ?MGR:start_link(I_represent, [a,b,c], MembersDict, [{active_mode,false}]), try - {ok, _P1} = ?MGR:test_calc_projection(M0, false), - - _ = ?MGR:test_calc_proposed_projection(M0), + {ok, P1} = ?MGR:test_calc_projection(M0, false), {local_write_result, ok, {remote_write_results, [{b,ok},{c,ok}]}} = - ?MGR:test_write_proposed_projection(M0), + ?MGR:test_write_public_projection(M0, P1), {unanimous, P1, Extra1} = ?MGR:test_read_latest_public_projection(M0, false), ok after ok = ?MGR:stop(M0), - ok = machi_flu0:stop(FLUa), - ok = machi_flu0:stop(FLUb), - ok = machi_flu0:stop(FLUc), + [ok = machi_flu1:stop(X) || X <- FLUs], ok = machi_partition_simulator:stop() end. From e06adabb6ab18bf70bdd1697cb0edefdff1b53cd Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 17:13:38 +0900 Subject: [PATCH 13/22] WIP: bogus flapping in nonunanimous_setup_and_fix_test() --- include/machi_chain_manager.hrl | 8 +- src/machi_chain_manager1.erl | 248 +++++++++++------------------ src/machi_projection.erl | 53 +++--- test/machi_chain_manager1_test.erl | 96 ++++++----- 4 files changed, 185 insertions(+), 220 deletions(-) diff --git a/include/machi_chain_manager.hrl b/include/machi_chain_manager.hrl index ae8e25b..849dbdc 100644 --- a/include/machi_chain_manager.hrl +++ b/include/machi_chain_manager.hrl @@ -25,14 +25,12 @@ -type projection() :: #projection_v1{}. -record(ch_mgr, { - init_finished :: boolean(), - active_p :: boolean(), name :: pv1_server(), - proj :: projection(), - proj_history :: queue(), - myflu :: pid() | atom(), flap_limit :: non_neg_integer(), + proj :: projection(), %% + timer :: 'undefined' | reference(), + proj_history :: queue(), flaps=0 :: integer(), flap_start = ?NOT_FLAPPING :: erlang:now(), diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index ac1e470..50e3469 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -65,17 +65,18 @@ -define(REACT(T), put(react, [T|get(react)])). %% API --export([start_link/3, start_link/4, stop/1, ping/1]). +-export([start_link/2, start_link/3, stop/1, ping/1]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). --export([make_projection_summary/1, projection_transitions_are_sane/2]). +-export([projection_transitions_are_sane/2]). -ifdef(TEST). -export([test_calc_projection/2, test_write_public_projection/2, test_read_latest_public_projection/2, + test_set_active/2, test_react_to_env/1, get_all_hosed/1]). @@ -90,11 +91,11 @@ -compile(export_all). -endif. %TEST -start_link(MyName, All_list, MembersDict) -> - start_link(MyName, All_list, MembersDict, []). +start_link(MyName, MembersDict) -> + start_link(MyName, MembersDict, []). -start_link(MyName, All_list, MembersDict, MgrOpts) -> - gen_server:start_link(?MODULE, {MyName, All_list, MembersDict, MgrOpts}, []). +start_link(MyName, MembersDict, MgrOpts) -> + gen_server:start_link(?MODULE, {MyName, MembersDict, MgrOpts}, []). stop(Pid) -> gen_server:call(Pid, {stop}, infinity). @@ -119,6 +120,9 @@ test_read_latest_public_projection(Pid, ReadRepairP) -> gen_server:call(Pid, {test_read_latest_public_projection, ReadRepairP}, infinity). +test_set_active(Pid, Boolean) when Boolean == true; Boolean == false -> + gen_server:call(Pid, {test_set_active, Boolean}, infinity). + test_react_to_env(Pid) -> gen_server:call(Pid, {test_react_to_env}, infinity). @@ -126,51 +130,43 @@ test_react_to_env(Pid) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -init({MyName, All_list, MembersDict, MgrOpts}) -> - RunEnv = [%% {seed, Seed}, - {seed, now()}, - {network_partitions, []}, - {network_islands, []}, - {flapping_i, []}, - {up_nodes, not_init_yet}], - BestProj = make_initial_projection(MyName, All_list, All_list, - [], []), - NoneProj = make_initial_projection(MyName, All_list, [], - [], []), +init({MyName, MembersDict, MgrOpts}) -> + All_list = [P#p_srvr.name || {_, P} <- orddict:to_list(MembersDict)], + Opt = fun(Key, Default) -> proplists:get_value(Key, MgrOpts, Default) end, + RunEnv = [{seed, Opt(seed, now())}, + {network_partitions, Opt(network_partitions, [])}, + {network_islands, Opt(network_islands, [])}, + {flapping_i, Opt(flapping, [])}, + {up_nodes, Opt(up_nodes, not_init_yet)}], + ActiveP = Opt(active_mode, true), + Down_list = All_list -- [MyName], + UPI_list = [MyName], + NoneProj = machi_projection:new(MyName, MembersDict, + Down_list, UPI_list, [], []), Proxies = orddict:fold( fun(K, P, Acc) -> {ok, Pid} = ?FLU_PC:start_link(P), [{K, Pid}|Acc] end, [], MembersDict), - S = #ch_mgr{init_finished=false, - active_p=proplists:get_value(active_mode, MgrOpts, true), - name=MyName, - proj=NoneProj, - proj_history=queue:new(), - myflu=MyName, - members_dict=MembersDict, - proxies_dict=orddict:from_list(Proxies), + S = #ch_mgr{name=MyName, %% TODO 2015-03-04: revisit, should this constant be bigger? %% Yes, this should be bigger, but it's a hack. There is %% no guarantee that all parties will advance to a minimum %% flap awareness in the amount of time that this mgr will. flap_limit=length(All_list) + 50, + proj=NoneProj, + timer='undefined', + proj_history=queue:new(), runenv=RunEnv, - opts=MgrOpts}, - - %% TODO: There is a bootstrapping problem there that needs to be - %% solved eventually: someone/something needs to set the initial - %% state for the chain. - %% - %% The PoC hack here will set the chain to all members. That may - %% be fine for testing purposes, but it won't work for real life. - %% For example, if chain C has been running with [a,b] for a - %% while, then we start c. We don't want c to immediately say, - %% hey, let's do [a,b,c] immediately ... UPI invariant requires - %% repair, etc. etc. - - self() ! {finish_init, BestProj}, - {ok, S}. + opts=MgrOpts, + members_dict=MembersDict, + proxies_dict=orddict:from_list(Proxies)}, + S2 = if ActiveP == false -> + S; + ActiveP == true -> + set_active_timer(S) + end, + {ok, S2}. handle_call({ping}, _From, S) -> {reply, pong, S}; @@ -191,23 +187,27 @@ handle_call({test_read_latest_public_projection, ReadRepairP}, _From, S) -> do_cl_read_latest_public_projection(ReadRepairP, S), Res = {Perhaps, Val, ExtraInfo}, {reply, Res, S2}; -handle_call(_Call, _From, #ch_mgr{init_finished=false} = S) -> - {reply, not_initialized, S}; +handle_call({test_set_active, Boolean}, _From, #ch_mgr{timer=TRef}=S) -> + case {Boolean, TRef} of + {true, undefined} -> + S2 = set_active_timer(S), + {reply, ok, S2}; + {false, TRef} when is_reference(TRef) -> + timer:cancel(TRef), + {reply, ok, S#ch_mgr{timer=undefined}}; + _ -> + {reply, error, S} + end; handle_call({test_react_to_env}, _From, S) -> {TODOtodo, S2} = do_react_to_env(S), {reply, TODOtodo, S2}; handle_call(_Call, _From, S) -> {reply, whaaaaaaaaaa, S}. -handle_cast(_Cast, #ch_mgr{init_finished=false} = S) -> - {noreply, S}; handle_cast(_Cast, S) -> ?D({cast_whaaaaaaaaaaa, _Cast}), {noreply, S}. -handle_info({finish_init, BestProj}, S) -> - S2 = finish_init(BestProj, S), - {noreply, S2}; handle_info(Msg, S) -> exit({bummer, Msg}), {noreply, S}. @@ -220,36 +220,16 @@ code_change(_OldVsn, S, _Extra) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -finish_init(BestProj, #ch_mgr{active_p=false}=S) -> - _ = erlang:send_after(1000, self(), {finish_init, BestProj}), - S; -finish_init(BestProj, #ch_mgr{init_finished=false, myflu=MyFLU} = S) -> - MyFLUPid = proxy_pid(MyFLU, S), - case ?FLU_PC:read_latest_projection(MyFLUPid, private) of - {error, not_written} -> - case ?FLU_PC:write_projection(MyFLUPid, private, BestProj) of - ok -> - S#ch_mgr{init_finished=true, proj=BestProj}; - {error, not_written} -> - exit({yo_impossible, ?LINE}); - Else -> - ?D({retry,Else}), - timer:sleep(100), - finish_init(BestProj, S) - end; - {ok, Proj} -> - S#ch_mgr{init_finished=true, proj=Proj}; - Else -> - ?D({todo, fix_up_eventually, Else}), - exit({yo_weird, Else}) - end. +set_active_timer(#ch_mgr{name=MyName, members_dict=MembersDict}=S) -> + FLU_list = [P#p_srvr.name || P <- MembersDict], + USec = calc_sleep_ranked_order(1000, 2000, MyName, FLU_list), + {ok, TRef} = timer:send_interval(USec), + S#ch_mgr{timer=TRef}. do_cl_write_public_proj(Proj, S) -> #projection_v1{epoch_number=Epoch} = Proj, cl_write_public_proj(Epoch, Proj, S). -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - cl_write_public_proj(Epoch, Proj, S) -> cl_write_public_proj(Epoch, Proj, false, S). @@ -262,13 +242,13 @@ cl_write_public_proj(Epoch, Proj, SkipLocalWriteErrorP, S) -> cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, S). cl_write_public_proj_local(Epoch, Proj, SkipLocalWriteErrorP, - #ch_mgr{myflu=MyFLU}=S) -> + #ch_mgr{name=MyName}=S) -> {_UpNodes, Partitions, S2} = calc_up_nodes(S), Res0 = perhaps_call_t( - S, Partitions, MyFLU, + S, Partitions, MyName, fun(Pid) -> ?FLU_PC:write_projection(Pid, public, Proj, ?TO) end), Continue = fun() -> - FLUs = Proj#projection_v1.all_members -- [MyFLU], + FLUs = Proj#projection_v1.all_members -- [MyName], cl_write_public_proj_remote(FLUs, Partitions, Epoch, Proj, S) end, case Res0 of @@ -291,7 +271,7 @@ cl_write_public_proj_remote(FLUs, Partitions, _Epoch, Proj, S) -> {{remote_write_results, Rs}, S}. do_cl_read_latest_public_projection(ReadRepairP, - #ch_mgr{proj=Proj1, myflu=_MyFLU} = S) -> + #ch_mgr{proj=Proj1} = S) -> _Epoch1 = Proj1#projection_v1.epoch_number, case cl_read_latest_projection(public, S) of {needs_repair, FLUsRs, Extra, S3} -> @@ -389,40 +369,6 @@ do_read_repair(FLUsRs, _Extra, #ch_mgr{proj=CurrentProj} = S) -> Res end. -make_initial_projection(MyName, All_list, UPI_list, Repairing_list, Ps) -> - make_projection(0, MyName, All_list, [], UPI_list, Repairing_list, Ps). - -make_projection(EpochNum, - MyName, All_list, Down_list, UPI_list, Repairing_list, - Dbg) -> - make_projection(EpochNum, - MyName, All_list, Down_list, UPI_list, Repairing_list, - Dbg, []). - -make_projection(EpochNum, - MyName, All_list, Down_list, UPI_list, Repairing_list, - Dbg, Dbg2) -> - P = #projection_v1{epoch_number=EpochNum, - epoch_csum= <<>>, % always checksums as <<>> - creation_time=now(), - author_server=MyName, - all_members=All_list, - down=Down_list, - upi=UPI_list, - repairing=Repairing_list, - dbg=Dbg, - dbg2=[] % always checksums as [] - }, - P2 = update_projection_checksum(P), - P2#projection_v1{dbg2=Dbg2}. - -update_projection_checksum(#projection_v1{dbg2=Dbg2} = P) -> - CSum = crypto:hash(sha, term_to_binary(P#projection_v1{dbg2=[]})), - P#projection_v1{epoch_csum=CSum, dbg2=Dbg2}. - -update_projection_dbg2(P, Dbg2) when is_list(Dbg2) -> - P#projection_v1{dbg2=Dbg2}. - calc_projection(S, RelativeToServer) -> calc_projection(S, RelativeToServer, []). @@ -442,10 +388,10 @@ calc_projection(_OldThreshold, _NoPartitionThreshold, LastProj, RelativeToServer, AllHosed, Dbg, #ch_mgr{name=MyName, runenv=RunEnv1}=S) -> #projection_v1{epoch_number=OldEpochNum, - all_members=All_list, - upi=OldUPI_list, - repairing=OldRepairing_list - } = LastProj, + members_dict=MembersDict, + upi=OldUPI_list, + repairing=OldRepairing_list + } = LastProj, LastUp = lists:usort(OldUPI_list ++ OldRepairing_list), AllMembers = (S#ch_mgr.proj)#projection_v1.all_members, {Up0, Partitions, RunEnv2} = calc_up_nodes(MyName, @@ -501,10 +447,10 @@ D_foo=[], {TentativeUPI, TentativeRepairing} end, - P = make_projection(OldEpochNum + 1, - MyName, All_list, Down, NewUPI, NewRepairing, - D_foo ++ - Dbg ++ [{ps, Partitions},{nodes_up, Up}]), + P = machi_projection:new(OldEpochNum + 1, + MyName, MembersDict, Down, NewUPI, NewRepairing, + D_foo ++ + Dbg ++ [{ps, Partitions},{nodes_up, Up}]), {P, S#ch_mgr{runenv=RunEnv3}}. check_latest_private_projections(FLUs, MyProj, Partitions, S) -> @@ -562,17 +508,8 @@ calc_up_nodes(MyName, AllMembers, RunEnv1) -> replace(PropList, Items) -> proplists:compact(Items ++ PropList). -make_projection_summary(#projection_v1{epoch_number=EpochNum, - all_members=_All_list, - down=Down_list, - author_server=Author, - upi=UPI_list, - repairing=Repairing_list, - dbg=Dbg, dbg2=Dbg2}) -> - [{epoch,EpochNum},{author,Author}, - {upi,UPI_list},{repair,Repairing_list},{down,Down_list}, - {d,Dbg}, {d2,Dbg2}]. - +rank_and_sort_projections([], CurrentProj) -> + rank_projections([CurrentProj], CurrentProj); rank_and_sort_projections(Ps, CurrentProj) -> Epoch = lists:max([Proj#projection_v1.epoch_number || Proj <- Ps]), MaxPs = [Proj || Proj <- Ps, @@ -595,8 +532,8 @@ rank_projections(Projs, CurrentProj) -> rank_projection(#projection_v1{upi=[]}, _MemberRank, _N) -> -100; rank_projection(#projection_v1{author_server=Author, - upi=UPI_list, - repairing=Repairing_list}, MemberRank, N) -> + upi=UPI_list, + repairing=Repairing_list}, MemberRank, N) -> AuthorRank = orddict:fetch(Author, MemberRank), %% (AuthorRank-AuthorRank) + % feels unstable???? AuthorRank + % feels stable @@ -649,7 +586,7 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, ?REACT(a30), RelativeToServer = MyName, {P_newprop1, S2} = calc_projection(S, RelativeToServer), - ?REACT({a30, ?LINE, [{newprop1, make_projection_summary(P_newprop1)}]}), + ?REACT({a30, ?LINE, [{newprop1, machi_projection:make_summary(P_newprop1)}]}), %% Are we flapping yet? {P_newprop2, S3} = calculate_flaps(P_newprop1, P_current, FlapLimit, S2), @@ -659,7 +596,7 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, #projection_v1{epoch_number=Epoch_latest}=P_latest, NewEpoch = erlang:max(Epoch_newprop2, Epoch_latest) + 1, P_newprop3 = P_newprop2#projection_v1{epoch_number=NewEpoch}, - ?REACT({a30, ?LINE, [{newprop3, make_projection_summary(P_newprop3)}]}), + ?REACT({a30, ?LINE, [{newprop3, machi_projection:make_summary(P_newprop3)}]}), {P_newprop10, S10} = case get_flap_count(P_newprop3) of @@ -708,7 +645,7 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, end, P_inner2 = P_inner#projection_v1{epoch_number=FinalInnerEpoch}, - InnerInfo = [{inner_summary, make_projection_summary(P_inner2)}, + InnerInfo = [{inner_summary, machi_projection:make_summary(P_inner2)}, {inner_projection, P_inner2}], DbgX = replace(P_newprop3#projection_v1.dbg, InnerInfo), ?REACT({a30, ?LINE, [qqqwww|DbgX]}), @@ -977,22 +914,22 @@ react_to_env_C100(P_newprop, P_latest, react_to_env_C300(P_newprop, P_latest, S) end. -react_to_env_C110(P_latest, #ch_mgr{myflu=MyFLU} = S) -> +react_to_env_C110(P_latest, #ch_mgr{name=MyName} = S) -> ?REACT(c110), %% TOOD: Should we carry along any extra info that that would be useful %% in the dbg2 list? Extra_todo = [], RunEnv = S#ch_mgr.runenv, Islands = proplists:get_value(network_islands, RunEnv), - P_latest2 = update_projection_dbg2( + P_latest2 = machi_projection:update_dbg2( P_latest, [%% {network_islands, Islands}, %% {hooray, {v2, date(), time()}} Islands--Islands |Extra_todo]), - MyFLUPid = proxy_pid(MyFLU, S), - ok = ?FLU_PC:write_projection(MyFLUPid, private, P_latest2, ?TO), + MyNamePid = proxy_pid(MyName, S), + ok = ?FLU_PC:write_projection(MyNamePid, private, P_latest2, ?TO), case proplists:get_value(private_write_verbose, S#ch_mgr.opts) of true -> {_,_,C} = os:timestamp(), @@ -1000,7 +937,7 @@ react_to_env_C110(P_latest, #ch_mgr{myflu=MyFLU} = S) -> {HH,MM,SS} = time(), io:format(user, "\n~2..0w:~2..0w:~2..0w.~3..0w ~p uses: ~w\n", [HH,MM,SS,MSec, S#ch_mgr.name, - make_projection_summary(P_latest2)]); + machi_projection:make_summary(P_latest2)]); _ -> ok end, @@ -1025,7 +962,7 @@ react_to_env_C120(P_latest, #ch_mgr{proj_history=H} = S) -> io:format(user, "HEE120s ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- HH, is_atom(X)])]), %% io:format(user, "HEE120 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(HH)]), - ?REACT({c120, [{latest, make_projection_summary(P_latest)}]}), + ?REACT({c120, [{latest, machi_projection:make_summary(P_latest)}]}), {{now_using, P_latest#projection_v1.epoch_number}, S#ch_mgr{proj=P_latest, proj_history=H3}}. @@ -1059,16 +996,16 @@ react_to_env_C300(#projection_v1{epoch_number=_Epoch_newprop}=P_newprop, %% This logic moved to A30. %% NewEpoch = erlang:max(Epoch_newprop, Epoch_latest) + 1, %% P_newprop2 = P_newprop#projection_v1{epoch_number=NewEpoch}, - %% react_to_env_C310(update_projection_checksum(P_newprop2), S). + %% react_to_env_C310(update_checksum(P_newprop2), S). - react_to_env_C310(update_projection_checksum(P_newprop), S). + react_to_env_C310(machi_projection:update_checksum(P_newprop), S). react_to_env_C310(P_newprop, S) -> ?REACT(c310), Epoch = P_newprop#projection_v1.epoch_number, {WriteRes, S2} = cl_write_public_proj_skip_local_error(Epoch, P_newprop, S), ?REACT({c310, ?LINE, - [{newprop, make_projection_summary(P_newprop)}, + [{newprop, machi_projection:make_summary(P_newprop)}, {write_result, WriteRes}]}), react_to_env_A10(S2). @@ -1200,7 +1137,7 @@ calculate_flaps(P_newprop, _P_current, FlapLimit, %% flaps each time ... but the C2xx path doesn't write a new %% proposal to everyone's public proj stores, and there's no %% guarantee that anyone else as written a new public proj either. - {update_projection_checksum(P_newprop#projection_v1{dbg=Dbg2}), + {machi_projection:update_checksum(P_newprop#projection_v1{dbg=Dbg2}), S#ch_mgr{flaps=NewFlaps, flap_start=NewFlapStart, runenv=RunEnv2}}. projection_transitions_are_sane(Ps, RelativeToServer) -> @@ -1439,12 +1376,12 @@ projection_transition_is_sane( true catch _Type:_Err -> - S1 = make_projection_summary(P1), - S2 = make_projection_summary(P2), + S1 = machi_projection:make_summary(P1), + S2 = machi_projection:make_summary(P2), Trace = erlang:get_stacktrace(), %% %% TODO: this history goop is useful sometimes for debugging but %% %% not for any "real" use. Get rid of it, for the long term. - %% H = (catch [{FLUName, Type, P#projection_v1.epoch_number, make_projection_summary(P)} || + %% H = (catch [{FLUName, Type, P#projection_v1.epoch_number, machi_projection:make_summary(P)} || %% FLUName <- P1#projection_v1.all_members, %% Type <- [public,private], %% P <- ?FLU_PC:proj_get_all(orddict:fetch(FLUName, What?), Type)]), @@ -1463,14 +1400,17 @@ find_common_prefix(_, _) -> []. sleep_ranked_order(MinSleep, MaxSleep, FLU, FLU_list) -> + USec = calc_sleep_ranked_order(MinSleep, MaxSleep, FLU, FLU_list), + timer:sleep(USec), + USec. + +calc_sleep_ranked_order(MinSleep, MaxSleep, FLU, FLU_list) -> Front = lists:takewhile(fun(X) -> X /=FLU end, FLU_list), Index = length(Front) + 1, NumNodes = length(FLU_list), SleepIndex = NumNodes - Index, SleepChunk = MaxSleep div NumNodes, - SleepTime = MinSleep + (SleepChunk * SleepIndex), - timer:sleep(SleepTime), - SleepTime. + MinSleep + (SleepChunk * SleepIndex). my_find_minmost([]) -> 0; @@ -1546,9 +1486,9 @@ perhaps_call_t(S, Partitions, FLU, DoIt) -> t_timeout end. -perhaps_call(#ch_mgr{name=MyName, myflu=MyFLU}=S, Partitions, FLU, DoIt) -> +perhaps_call(#ch_mgr{name=MyName}=S, Partitions, FLU, DoIt) -> ProxyPid = proxy_pid(FLU, S), - RemoteFLU_p = FLU /= MyFLU, + RemoteFLU_p = FLU /= MyName, case RemoteFLU_p andalso lists:member({MyName, FLU}, Partitions) of false -> Res = DoIt(ProxyPid), @@ -1556,11 +1496,11 @@ perhaps_call(#ch_mgr{name=MyName, myflu=MyFLU}=S, Partitions, FLU, DoIt) -> false -> Res; _ -> - (catch put(react, [{timeout2,me,MyFLU,to,FLU,RemoteFLU_p,Partitions}|get(react)])), + (catch put(react, [{timeout2,me,MyName,to,FLU,RemoteFLU_p,Partitions}|get(react)])), exit(timeout) end; _ -> - (catch put(react, [{timeout1,me,MyFLU,to,FLU,RemoteFLU_p,Partitions}|get(react)])), + (catch put(react, [{timeout1,me,MyName,to,FLU,RemoteFLU_p,Partitions}|get(react)])), exit(timeout) end. diff --git a/src/machi_projection.erl b/src/machi_projection.erl index 6f7bd7f..5f97f94 100644 --- a/src/machi_projection.erl +++ b/src/machi_projection.erl @@ -26,10 +26,10 @@ -export([ new/6, new/7, new/8, - update_projection_checksum/1, - update_projection_dbg2/2, + update_checksum/1, + update_dbg2/2, compare/2, - make_projection_summary/1, + make_summary/1, make_members_dict/1 ]). @@ -51,13 +51,13 @@ new(EpochNum, MyName, MemberDict, Down_list, UPI_list, Repairing_list, Dbg) -> %% or it may be simply `list(p_srvr())', in which case we'll convert it %% to a `p_srvr_dict()'. -new(EpochNum, MyName, MemberDict0, Down_list, UPI_list, Repairing_list, +new(EpochNum, MyName, MembersDict0, Down_list, UPI_list, Repairing_list, Dbg, Dbg2) when is_integer(EpochNum), EpochNum >= 0, is_atom(MyName) orelse is_binary(MyName), - is_list(MemberDict0), is_list(Down_list), is_list(UPI_list), + is_list(MembersDict0), is_list(Down_list), is_list(UPI_list), is_list(Repairing_list), is_list(Dbg), is_list(Dbg2) -> - MembersDict = make_members_dict(MemberDict0), + MembersDict = make_members_dict(MembersDict0), All_list = [Name || {Name, _P} <- MembersDict], true = lists:all(fun(X) when is_atom(X) orelse is_binary(X) -> true; (_) -> false @@ -85,11 +85,11 @@ new(EpochNum, MyName, MemberDict0, Down_list, UPI_list, Repairing_list, repairing=Repairing_list, dbg=Dbg }, - update_projection_dbg2(update_projection_checksum(P), Dbg2). + update_dbg2(update_checksum(P), Dbg2). %% @doc Update the checksum element of a projection record. -update_projection_checksum(P) -> +update_checksum(P) -> CSum = crypto:hash(sha, term_to_binary(P#projection_v1{epoch_csum= <<>>, dbg2=[]})), @@ -97,7 +97,7 @@ update_projection_checksum(P) -> %% @doc Update the `dbg2' element of a projection record. -update_projection_dbg2(P, Dbg2) when is_list(Dbg2) -> +update_dbg2(P, Dbg2) when is_list(Dbg2) -> P#projection_v1{dbg2=Dbg2}. %% @doc Compare two projection records for equality (assuming that the @@ -120,13 +120,13 @@ compare(#projection_v1{epoch_number=E1}, %% @doc Create a proplist-style summary of a projection record. -make_projection_summary(#projection_v1{epoch_number=EpochNum, - all_members=_All_list, - down=Down_list, - author_server=Author, - upi=UPI_list, - repairing=Repairing_list, - dbg=Dbg, dbg2=Dbg2}) -> +make_summary(#projection_v1{epoch_number=EpochNum, + all_members=_All_list, + down=Down_list, + author_server=Author, + upi=UPI_list, + repairing=Repairing_list, + dbg=Dbg, dbg2=Dbg2}) -> [{epoch,EpochNum},{author,Author}, {upi,UPI_list},{repair,Repairing_list},{down,Down_list}, {d,Dbg}, {d2,Dbg2}]. @@ -145,16 +145,21 @@ make_projection_summary(#projection_v1{epoch_number=EpochNum, -spec make_members_dict(list(p_srvr()) | p_srvr_dict()) -> p_srvr_dict(). make_members_dict(Ps) -> - case lists:all(fun(P) when is_record(P, p_srvr) -> true; - (_) -> false - end, Ps) of + F_rec = fun(P) when is_record(P, p_srvr) -> true; + (_) -> false + end, + F_tup = fun({_K, P}) when is_record(P, p_srvr) -> true; + (_) -> false + end, + case lists:all(F_rec, Ps) of true -> orddict:from_list([{P#p_srvr.name, P} || P <- Ps]); false -> - case lists:all(fun({_K, P}) when is_record(P, p_srvr) -> true; - (_) -> false - end, Ps) of + case lists:all(F_tup, Ps) of true -> - orddict:from_list(Ps) - end % No false clause, crash it! + orddict:from_list(Ps); + false -> + F_neither = fun(X) -> not (F_rec(X) or F_tup(X)) end, + exit({badarg, {make_members_dict, lists:filter(F_neither, Ps)}}) + end end. diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 9a25e49..20cf93c 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -135,27 +135,16 @@ chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> -ifndef(PULSE). smoke0_test() -> - %% TODO attack list: - %% __ Add start option to chain manager to be "passive" only, i.e., - %% not immediately go to work on - %% 1. Start FLUs with full complement of FLU+proj+chmgr. - %% 2. Put each of them under a supervisor? - %% - Sup proc could be a created-specifically-for-test thing, perhaps? - %% Rather than relying on a supervisor with reg name + OTP app started - %% plus plus more more yaddayadda? - %% 3. Add projection catalog/orddict of #p_srvr records?? - %% 4. Backport the changes to smoke0_test(). - %% 5. Do it to smoke1 test, yadda... {ok, _} = machi_partition_simulator:start_link({1,2,3}, 50, 50), Host = "localhost", TcpPort = 6623, {ok, FLUa} = machi_flu1:start_link([{a,TcpPort,"./data.a"}]), Pa = #p_srvr{name=a, proto=ipv4, address=Host, port=TcpPort}, - P_Srvr_Dict = machi_projection:make_members_dict([Pa]), + Members_Dict = machi_projection:make_members_dict([Pa]), %% Egadz, more racing on startup, yay. TODO fix. timer:sleep(1), {ok, FLUaP} = ?FLU_PC:start_link(Pa), - {ok, M0} = ?MGR:start_link(a, [a,b,c], P_Srvr_Dict, [{active_mode, false}]), + {ok, M0} = ?MGR:start_link(a, Members_Dict, [{active_mode, false}]), _SockA = machi_util:connect(Host, TcpPort), try pong = ?MGR:ping(M0) @@ -172,12 +161,12 @@ smoke1_test() -> FluInfo = [{a,TcpPort+0,"./data.a"}, {b,TcpPort+1,"./data.b"}, {c,TcpPort+2,"./data.c"}], P_s = [#p_srvr{name=Name, address="localhost", port=Port} || {Name,Port,_Dir} <- FluInfo], - + + [machi_flu1_test:clean_up_data_dir(Dir) || {_,_,Dir} <- FluInfo], FLUs = [element(2, machi_flu1:start_link([{Name,Port,Dir}])) || {Name,Port,Dir} <- FluInfo], MembersDict = machi_projection:make_members_dict(P_s), - I_represent = a, - {ok, M0} = ?MGR:start_link(I_represent, [a,b,c], MembersDict, [{active_mode,false}]), + {ok, M0} = ?MGR:start_link(a, MembersDict, [{active_mode,false}]), try {ok, P1} = ?MGR:test_calc_projection(M0, false), {local_write_result, ok, @@ -192,24 +181,44 @@ smoke1_test() -> ok = machi_partition_simulator:stop() end. -nonunanimous_setup_and_fix_testTODO() -> +nonunanimous_setup_and_fix_test() -> + %% TODO attack list: + %% __ Add start option to chain manager to be "passive" only, i.e., + %% not immediately go to work on + %% 1. Start FLUs with full complement of FLU+proj+chmgr. + %% 2. Put each of them under a supervisor? + %% - Sup proc could be a created-specifically-for-test thing, perhaps? + %% Rather than relying on a supervisor with reg name + OTP app started + %% plus plus more more yaddayadda? + %% 3. Add projection catalog/orddict of #p_srvr records?? + %% 4. Fix this test, etc etc. machi_partition_simulator:start_link({1,2,3}, 100, 0), - {ok, FLUa} = machi_flu0:start_link(a), - {ok, FLUb} = machi_flu0:start_link(b), - I_represent = I_am = a, - {ok, Ma} = ?MGR:start_link(I_represent, [a,b], I_am), - {ok, Mb} = ?MGR:start_link(b, [a,b], b), + TcpPort = 62877, + FluInfo = [{a,TcpPort+0,"./data.a"}, {b,TcpPort+1,"./data.b"}], + P_s = [#p_srvr{name=Name, address="localhost", port=Port} || + {Name,Port,_Dir} <- FluInfo], + + [machi_flu1_test:clean_up_data_dir(Dir) || {_,_,Dir} <- FluInfo], + FLUs = [element(2, machi_flu1:start_link([{Name,Port,Dir}])) || + {Name,Port,Dir} <- FluInfo], + [Proxy_a, Proxy_b] = Proxies = + [element(2,?FLU_PC:start_link(P)) || P <- P_s], + MembersDict = machi_projection:make_members_dict(P_s), + {ok, Ma} = ?MGR:start_link(a, MembersDict, [{active_mode, false}]), + {ok, Mb} = ?MGR:start_link(b, MembersDict, [{active_mode, false}]), try +io:format(user, "LINE ~p\n", [?LINE]), {ok, P1} = ?MGR:test_calc_projection(Ma, false), - P1a = ?MGR:update_projection_checksum( - P1#projection_v1{down=[b], upi=[a], dbg=[{hackhack, ?LINE}]}), - P1b = ?MGR:update_projection_checksum( - P1#projection_v1{author_server=b, creation_time=now(), - down=[a], upi=[b], dbg=[{hackhack, ?LINE}]}), - P1Epoch = P1#projection_v1.epoch_number, - ok = machi_flu0:proj_write(FLUa, P1Epoch, public, P1a), - ok = machi_flu0:proj_write(FLUb, P1Epoch, public, P1b), + P1a = machi_projection:update_checksum( + P1#projection_v1{down=[b], upi=[a], dbg=[{hackhack, ?LINE}]}), + P1b = machi_projection:update_checksum( + P1#projection_v1{author_server=b, creation_time=now(), + down=[a], upi=[b], dbg=[{hackhack, ?LINE}]}), + %% Scribble different projections + ok = ?FLU_PC:write_projection(Proxy_a, public, P1a), + ok = ?FLU_PC:write_projection(Proxy_b, public, P1b), +io:format(user, "LINE ~p\n", [?LINE]), ?D(x), {not_unanimous,_,_}=_XX = ?MGR:test_read_latest_public_projection(Ma, false), @@ -220,27 +229,40 @@ nonunanimous_setup_and_fix_testTODO() -> %% we expect nothing to change when called again. {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), +io:format(user, "LINE ~p\n", [?LINE]), + _ = ?MGR:test_react_to_env(Ma), +io:format(user, "LINE ~p\n", [?LINE]), {now_using, _} = ?MGR:test_react_to_env(Ma), +io:format(user, "LINE ~p\n", [?LINE]), {unanimous,P2,E2} = ?MGR:test_read_latest_public_projection(Ma, false), - {ok, P2pa} = machi_flu0:proj_read_latest(FLUa, private), +io:format(user, "LINE ~p\n", [?LINE]), + {ok, P2pa} = ?FLU_PC:read_latest_projection(Proxy_a, private), +io:format(user, "LINE ~p\n", [?LINE]), P2 = P2pa#projection_v1{dbg2=[]}, +io:format(user, "LINE ~p\n", [?LINE]), - %% FLUb should still be using proj #0 for its private use - {ok, P0pb} = machi_flu0:proj_read_latest(FLUb, private), - 0 = P0pb#projection_v1.epoch_number, + %% FLUb should have nothing written to private because it hasn't + %% reacted yet. + {error, not_written} = ?FLU_PC:read_latest_projection(Proxy_b, private), +io:format(user, "LINE ~p\n", [?LINE]), %% Poke FLUb to react ... should be using the same private proj %% as FLUa. {now_using, _} = ?MGR:test_react_to_env(Mb), - {ok, P2pb} = machi_flu0:proj_read_latest(FLUb, private), +io:format(user, "LINE ~p\n", [?LINE]), + {ok, P2pb} = ?FLU_PC:read_latest_projection(Proxy_b, private), +io:format(user, "LINE ~p\n", [?LINE]), +io:format(user, "P2 ~p\n", [machi_projection:make_summary(P2)]), +io:format(user, "P2pb ~p\n", [machi_projection:make_summary(P2pb)]), P2 = P2pb#projection_v1{dbg2=[]}, +io:format(user, "LINE ~p\n", [?LINE]), ok after ok = ?MGR:stop(Ma), ok = ?MGR:stop(Mb), - ok = machi_flu0:stop(FLUa), - ok = machi_flu0:stop(FLUb), + [ok = ?FLU_PC:quit(X) || X <- Proxies], + [ok = machi_flu1:stop(X) || X <- FLUs], ok = machi_partition_simulator:stop() end. From 6cd9dfc97774d223857dcbece5f7f81688a61496 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 17:47:43 +0900 Subject: [PATCH 14/22] WIP: nonunanimous_setup_and_fix_test() passes --- src/machi_chain_manager1.erl | 9 +++++++-- test/machi_chain_manager1_test.erl | 32 ++++++++++-------------------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 50e3469..3859192 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -666,7 +666,9 @@ react_to_env_A40(Retries, P_newprop, P_latest, LatestUnanimousP, P_newprop#projection_v1.down), if - P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number + (P_current#projection_v1.epoch_number > 0 + andalso + P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number) orelse not LatestUnanimousP -> ?REACT({a40, ?LINE, @@ -784,7 +786,7 @@ react_to_env_A50(P_latest, S) -> react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, Rank_newprop, Rank_latest, - #ch_mgr{name=MyName, flap_limit=FlapLimit}=S) -> + #ch_mgr{name=MyName, flap_limit=FlapLimit}=S)-> ?REACT(b10), {_P_newprop_flap_time, P_newprop_flap_count} = get_flap_count(P_newprop), @@ -896,6 +898,9 @@ react_to_env_C100(P_newprop, P_latest, case {ShortCircuit_p, projection_transition_is_sane(P_current, P_latest, MyName)} of + _ when P_current#projection_v1.epoch_number =< 0 -> + ?REACT({c100, ?LINE, [first_write]}), + react_to_env_C110(P_latest, S); {true, _} -> %% Someone else believes that I am repairing. We assume %% that nobody is being Byzantine, so we'll believe that I diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 20cf93c..384d267 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -204,10 +204,11 @@ nonunanimous_setup_and_fix_test() -> [Proxy_a, Proxy_b] = Proxies = [element(2,?FLU_PC:start_link(P)) || P <- P_s], MembersDict = machi_projection:make_members_dict(P_s), - {ok, Ma} = ?MGR:start_link(a, MembersDict, [{active_mode, false}]), - {ok, Mb} = ?MGR:start_link(b, MembersDict, [{active_mode, false}]), + XX = [], + %% XX = [{private_write_verbose,true}], + {ok, Ma} = ?MGR:start_link(a, MembersDict, [{active_mode, false}]++XX), + {ok, Mb} = ?MGR:start_link(b, MembersDict, [{active_mode, false}]++XX), try -io:format(user, "LINE ~p\n", [?LINE]), {ok, P1} = ?MGR:test_calc_projection(Ma, false), P1a = machi_projection:update_checksum( @@ -218,44 +219,31 @@ io:format(user, "LINE ~p\n", [?LINE]), %% Scribble different projections ok = ?FLU_PC:write_projection(Proxy_a, public, P1a), ok = ?FLU_PC:write_projection(Proxy_b, public, P1b), -io:format(user, "LINE ~p\n", [?LINE]), - ?D(x), + %% ?D(x), {not_unanimous,_,_}=_XX = ?MGR:test_read_latest_public_projection(Ma, false), - ?Dw(_XX), + %% ?Dw(_XX), {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), %% The read repair here doesn't automatically trigger the creation of %% a new projection (to try to create a unanimous projection). So %% we expect nothing to change when called again. {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), -io:format(user, "LINE ~p\n", [?LINE]), - _ = ?MGR:test_react_to_env(Ma), -io:format(user, "LINE ~p\n", [?LINE]), - {now_using, _} = ?MGR:test_react_to_env(Ma), -io:format(user, "LINE ~p\n", [?LINE]), - {unanimous,P2,E2} = ?MGR:test_read_latest_public_projection(Ma, false), -io:format(user, "LINE ~p\n", [?LINE]), + {now_using, EpochNum_a} = ?MGR:test_react_to_env(Ma), + {no_change, EpochNum_a} = ?MGR:test_react_to_env(Ma), + {unanimous,P2,_E2} = ?MGR:test_read_latest_public_projection(Ma, false), {ok, P2pa} = ?FLU_PC:read_latest_projection(Proxy_a, private), -io:format(user, "LINE ~p\n", [?LINE]), P2 = P2pa#projection_v1{dbg2=[]}, -io:format(user, "LINE ~p\n", [?LINE]), %% FLUb should have nothing written to private because it hasn't %% reacted yet. {error, not_written} = ?FLU_PC:read_latest_projection(Proxy_b, private), -io:format(user, "LINE ~p\n", [?LINE]), %% Poke FLUb to react ... should be using the same private proj %% as FLUa. - {now_using, _} = ?MGR:test_react_to_env(Mb), -io:format(user, "LINE ~p\n", [?LINE]), + {now_using, EpochNum_a} = ?MGR:test_react_to_env(Mb), {ok, P2pb} = ?FLU_PC:read_latest_projection(Proxy_b, private), -io:format(user, "LINE ~p\n", [?LINE]), -io:format(user, "P2 ~p\n", [machi_projection:make_summary(P2)]), -io:format(user, "P2pb ~p\n", [machi_projection:make_summary(P2pb)]), P2 = P2pb#projection_v1{dbg2=[]}, -io:format(user, "LINE ~p\n", [?LINE]), ok after From 2b1eb9b144897cb57529090e1c81d371fd872e68 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 18:08:29 +0900 Subject: [PATCH 15/22] WIP: Move convergence demo to new module machi_chain_manager1_converg_demo.erl --- test/machi_chain_manager1_converge_demo.erl | 412 ++++++++++++++++++++ test/machi_chain_manager1_test.erl | 360 ----------------- 2 files changed, 412 insertions(+), 360 deletions(-) create mode 100644 test/machi_chain_manager1_converge_demo.erl diff --git a/test/machi_chain_manager1_converge_demo.erl b/test/machi_chain_manager1_converge_demo.erl new file mode 100644 index 0000000..963e76d --- /dev/null +++ b/test/machi_chain_manager1_converge_demo.erl @@ -0,0 +1,412 @@ +%% ------------------------------------------------------------------- +%% +%% Machi: a small village of replicated files +%% +%% Copyright (c) 2014 Basho Technologies, Inc. All Rights Reserved. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(machi_chain_manager1_converge_demo). + +-include("machi.hrl"). +-include("machi_projection.hrl"). + +-define(MGR, machi_chain_manager1). + +-define(D(X), io:format(user, "~s ~p\n", [??X, X])). +-define(Dw(X), io:format(user, "~s ~w\n", [??X, X])). +-define(FLU_C, machi_flu1_client). +-define(FLU_PC, machi_proxy_flu1_client). + +-compile(export_all). + +-ifdef(TEST). + +-ifdef(EQC). +-include_lib("eqc/include/eqc.hrl"). +%% -include_lib("eqc/include/eqc_statem.hrl"). +-define(QC_OUT(P), + eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)). +-endif. + +-include_lib("eunit/include/eunit.hrl"). + +short_doc() -> +" +A visualization of the convergence behavior of the chain self-management +algorithm for Machi. + 1. Set up 4 FLUs and chain manager pairs. + 2. Create a number of different network partition scenarios, where + (simulated) partitions may be symmetric or asymmetric. Then halt changing + the partitions and keep the simulated network stable and broken. + 3. Run a number of iterations of the algorithm in parallel by poking each + of the manager processes on a random'ish basis. + 4. Afterward, fetch the chain transition changes made by each FLU and + verify that no transition was unsafe. + +During the iteration periods, the following is a cheatsheet for the output. +See the internal source for interpreting the rest of the output. + + 'Let loose the dogs of war!' Network instability + 'SET partitions = ' Network stability (but broken) + 'x uses:' The FLU x has made an internal state transition. The rest of + the line is a dump of internal state. + '{t}' This is a tick event which triggers one of the manager processes + to evaluate its environment and perhaps make a state transition. + +A long chain of '{t}{t}{t}{t}' means that the chain state has settled +to a stable configuration, which is the goal of the algorithm. +Press control-c to interrupt....". + +long_doc() -> + " +'Let loose the dogs of war!' + + The simulated network is very unstable for a few seconds. + +'x uses' + + After a single iteration, server x has determined that the chain + should be defined by the upi, repair, and down list in this record. + If all participants reach the same conclusion at the same epoch + number (and checksum, see next item below), then the chain is + stable, fully configured, and can provide full service. + +'epoch,E' + + The epoch number for this decision is E. The checksum of the full + record is not shown. For purposes of the protocol, a server will + 'wedge' itself and refuse service (until a new config is chosen) + whenever: a). it sees a bigger epoch number mentioned somewhere, or + b). it sees the same epoch number but a different checksum. In case + of b), there was a network partition that has healed, and both sides + had chosen to operate with an identical epoch number but different + chain configs. + +'upi', 'repair', and 'down' + + Members in the chain that are fully in sync and thus preserving the + Update Propagation Invariant, up but under repair (simulated), and + down, respectively. + +'ps,[some list]' + + The list of asymmetric network partitions. {a,b} means that a + cannot send to b, but b can send to a. + + This partition list is recorded for debugging purposes but is *not* + used by the algorithm. The algorithm only 'feels' its effects via + simulated timeout whenever there's a partition in one of the + messaging directions. + +'nodes_up,[list]' + + The best guess right now of which ndoes are up, relative to the + author node, specified by '{author,X}' + +'SET partitions = [some list]' + + All subsequent iterations should have a stable list of partitions, + i.e. the 'ps' list described should be stable. + +'{FLAP: x flaps n}!' + + Server x has detected that it's flapping/oscillating after iteration + n of a naive/1st draft detection algorithm. +". + +%% convergence_demo_test_() -> +%% {timeout, 98*300, fun() -> convergence_demo_testfun() end}. + +%% convergence_demo_testfun() -> +%% convergence_demo_testfun(3). + +t() -> + t(3). + +t(N) -> + convergence_demo_testfun(N). + +convergence_demo_testfun(NumFLUs) -> + timer:sleep(100), + io:format(user, short_doc(), []), + %% Faster test startup, commented: timer:sleep(3000), + + FLU_biglist = [a,b,c,d,e,f,g], + All_list = lists:sublist(FLU_biglist, NumFLUs), + io:format(user, "\nSET # of FLus = ~w members ~w).\n", + [NumFLUs, All_list]), + machi_partition_simulator:start_link({111,222,33}, 0, 100), + _ = machi_partition_simulator:get(All_list), + + Namez = + [begin + {ok, Pid} = machi_flu0:start_link(Name), + {Name, Pid} + end || Name <- All_list ], + + MgrOpts = [private_write_verbose], + MgrNamez = + [begin + {ok, MPid} = ?MGR:start_link(Name, All_list, FLUPid, MgrOpts), + {Name, MPid} + end || {Name, FLUPid} <- Namez], + try + [{_, Ma}|_] = MgrNamez, + {ok, P1} = ?MGR:test_calc_projection(Ma, false), + P1Epoch = P1#projection_v1.epoch_number, + [ok = machi_flu0:proj_write(FLUPid, P1Epoch, public, P1) || + {_, FLUPid} <- Namez, FLUPid /= Ma], + + machi_partition_simulator:reset_thresholds(10, 50), + _ = machi_partition_simulator:get(All_list), + + Parent = self(), + DoIt = fun(Iters, S_min, S_max) -> + io:format(user, "\nDoIt: top\n\n", []), + Pids = [spawn(fun() -> + random:seed(now()), + [begin + erlang:yield(), + S_max_rand = random:uniform( + S_max + 1), + io:format(user, "{t}", []), + Elapsed = + ?MGR:sleep_ranked_order( + S_min, S_max_rand, + M_name, All_list), + _ = ?MGR:test_react_to_env(MMM), + %% if M_name == d -> + %% [_ = ?MGR:test_react_to_env(MMM) || + %% _ <- lists:seq(1,3)], + %% superunfair; + %% true -> + %% ok + %% end, + %% Be more unfair by not + %% sleeping here. + %% timer:sleep(S_max - Elapsed), + Elapsed + end || _ <- lists:seq(1, Iters)], + Parent ! done + end) || {M_name, MMM} <- MgrNamez ], + [receive + done -> + ok + after 995000 -> + exit(icky_timeout) + end || _ <- Pids] + end, + + _XandYs1 = [[{X,Y}] || X <- All_list, Y <- All_list, X /= Y], + _XandYs2 = [[{X,Y}, {A,B}] || X <- All_list, Y <- All_list, X /= Y, + A <- All_list, B <- All_list, A /= B, + X /= A], + _XandYs3 = [[{X,Y}, {A,B}, {C,D}] || X <- All_list, Y <- All_list, X /= Y, + A <- All_list, B <- All_list, A /= B, + C <- All_list, D <- All_list, C /= D, + X /= A, X /= C, A /= C], + %% AllPartitionCombinations = _XandYs1 ++ _XandYs2, + %% AllPartitionCombinations = _XandYs3, + AllPartitionCombinations = _XandYs1 ++ _XandYs2 ++ _XandYs3, + ?D({?LINE, length(AllPartitionCombinations)}), + + machi_partition_simulator:reset_thresholds(10, 50), + io:format(user, "\nLet loose the dogs of war!\n", []), + DoIt(30, 0, 0), + [begin + io:format(user, "\nSET partitions = ~w.\n", [ [] ]),machi_partition_simulator:no_partitions(), + [DoIt(50, 10, 100) || _ <- [1,2,3]], + + %% machi_partition_simulator:reset_thresholds(10, 50), + %% io:format(user, "\nLet loose the dogs of war!\n", []), + %% DoIt(30, 0, 0), + + machi_partition_simulator:always_these_partitions(Partition), + io:format(user, "\nSET partitions = ~w.\n", [Partition]), + [DoIt(50, 10, 100) || _ <- [1,2,3,4] ], + PPP = + [begin + PPPallPubs = machi_flu0:proj_list_all(FLU, public), + [begin + {ok, Pr} = machi_flu0:proj_read(FLU, PPPepoch, public), + {Pr#projection_v1.epoch_number, FLUName, Pr} + end || PPPepoch <- PPPallPubs] + end || {FLUName, FLU} <- Namez], + io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), + + %%%%%%%% {stable,true} = {stable,private_projections_are_stable(Namez, DoIt)}, + {hosed_ok,true} = {hosed_ok,all_hosed_lists_are_identical(Namez, Partition)}, + io:format(user, "\nSweet, all_hosed are identical-or-islands-inconclusive.\n", []), + timer:sleep(1000), + ok + end || Partition <- AllPartitionCombinations + %% end || Partition <- [ [{a,b},{b,d},{c,b}], + %% [{a,b},{b,d},{c,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], + %% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], + %% [{a,b},{b,d},{c,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], + %% [{a,b},{b,d},{c,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] + %% end || Partition <- [ [{a,b}, {b,c}], + %% [{a,b}, {c,b}] ] + %% end || Partition <- [ [{a,b}, {b,c}] ] %% hosed-not-equal @ 3 FLUs + %% end || Partition <- [ [{a,b}], + %% [{b,a}] ] + %% end || Partition <- [ [{a,b}, {c,b}], + %% [{a,b}, {b,c}] ] + %% end || Partition <- [ [{a,b}, {b,c}, {c,d}], + %% [{a,b}, {b,c},{b,d}, {c,d}], + %% [{b,a}, {b,c}, {c,d}], + %% [{a,b}, {c,b}, {c,d}], + %% [{a,b}, {b,c}, {d,c}] ] + %% end || Partition <- [ [{a,b}, {b,c}, {c,d}, {d,e}], + %% [{b,a}, {b,c}, {c,d}, {d,e}], + %% [{a,b}, {c,b}, {c,d}, {d,e}], + %% [{a,b}, {b,c}, {d,c}, {d,e}], + %% [{a,b}, {b,c}, {c,d}, {e,d}] ] + %% end || Partition <- [ [{c,a}] ] + %% end || Partition <- [ [{c,a}], [{c,b}, {a, b}] ] + %% end || Partition <- [ [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}], + %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {b,c}], + %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {c,d}] ] + %% end || Partition <- [ [{a,b}], + %% [{a,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], + %% [{a,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], + %% [{a,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], + %% [{a,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] + ], + %% exit(end_experiment), + + io:format(user, "\nSET partitions = []\n", []), + io:format(user, "We should see convergence to 1 correct chain.\n", []), + machi_partition_simulator:no_partitions(), + [DoIt(50, 10, 100) || _ <- [1]], + io:format(user, "Sweet, finishing early\n", []), exit(yoyoyo_testing_hack), + %% WARNING: In asymmetric partitions, private_projections_are_stable() + %% will never be true; code beyond this point on the -exp3 + %% branch is bit-rotted, sorry! + true = private_projections_are_stable(Namez, DoIt), + io:format(user, "~s\n", [os:cmd("date")]), + + %% We are stable now ... analyze it. + + %% Create a report where at least one FLU has written a + %% private projection. + Report = machi_chain_manager1_test:unanimous_report(Namez), + %% ?D(Report), + + %% Report is ordered by Epoch. For each private projection + %% written during any given epoch, confirm that all chain + %% members appear in only one unique chain, i.e., the sets of + %% unique chains are disjoint. + true = machi_chain_manager1_test:all_reports_are_disjoint(Report), + + %% Given the report, we flip it around so that we observe the + %% sets of chain transitions relative to each FLU. + R_Chains = [machi_chain_manager1_test:extract_chains_relative_to_flu( + FLU, Report) || FLU <- All_list], + %% ?D(R_Chains), + R_Projs = [{FLU, [machi_chain_manager1_test:chain_to_projection( + FLU, Epoch, UPI, Repairing, All_list) || + {Epoch, UPI, Repairing} <- E_Chains]} || + {FLU, E_Chains} <- R_Chains], + + %% For each chain transition experienced by a particular FLU, + %% confirm that each state transition is OK. + try + [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane(Ps, FLU)} || + {FLU, Ps} <- R_Projs], + io:format(user, "\nAll sanity checks pass, hooray!\n", []) + catch _Err:_What -> + io:format(user, "Report ~p\n", [Report]), + exit({line, ?LINE, _Err, _What}) + end, + %% ?D(R_Projs), + + ok + after + [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez], + [ok = machi_flu0:stop(FLUPid) || {_, FLUPid} <- Namez], + ok = machi_partition_simulator:stop() + end. + +private_projections_are_stable(Namez, PollFunc) -> + Private1 = [machi_flu0:proj_get_latest_num(FLU, private) || + {_Name, FLU} <- Namez], + PollFunc(5, 1, 10), + Private2 = [machi_flu0:proj_get_latest_num(FLU, private) || + {_Name, FLU} <- Namez], + true = (Private1 == Private2). + +all_hosed_lists_are_identical(Namez, Partition0) -> + Partition = lists:usort(Partition0), + Ps = [machi_flu0:proj_read_latest(FLU, private) || {_Name, FLU} <- Namez], + UniqueAllHoseds = lists:usort([machi_chain_manager1:get_all_hosed(P) || + {ok, P} <- Ps]), + Members = [M || {M, _Pid} <- Namez], + Islands = machi_partition_simulator:partitions2num_islands( + Members, Partition), + %% io:format(user, "all_hosed_lists_are_identical:\n", []), + %% io:format(user, " Uniques = ~p Islands ~p\n Partition ~p\n", + %% [Uniques, Islands, Partition]), + case length(UniqueAllHoseds) of + 1 -> + true; + %% TODO: With the addition of the digraph stuff below, the clause + %% below probably isn't necessary anymore, since the + %% digraph calculation should catch complete partition islands? + _ when Islands == 'many' -> + %% There are at least two partitions, so yes, it's quite + %% possible that the all_hosed lists may differ. + %% TODO Fix this up to be smarter about fully-isolated + %% islands of partition. + true; + _ -> + DG = digraph:new(), + Connection = machi_partition_simulator:partition2connection( + Members, Partition), + [digraph:add_vertex(DG, X) || X <- Members], + [digraph:add_edge(DG, X, Y) || {X,Y} <- Connection], + Any = + lists:any( + fun(X) -> + NotX = Members -- [X], + lists:any( + fun(Y) -> + %% There must be a shortest path of length + %% two in both directions, otherwise + %% the read projection call will fail. + %% And it's that failure that we're + %% interested in here. + XtoY = digraph:get_short_path(DG, X, Y), + YtoX = digraph:get_short_path(DG, Y, X), + (XtoY == false orelse + length(XtoY) > 2) + orelse + (YtoX == false orelse + length(YtoX) > 2) + end, NotX) + end, Members), + digraph:delete(DG), + if Any == true -> + %% There's a missing path of length 2 between some + %% two FLUs, so yes, there's going to be + %% non-identical all_hosed lists. + true; + true -> + false % There's no excuse, buddy + end + end. +-endif. % TEST diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 384d267..20ec7f2 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -254,365 +254,5 @@ nonunanimous_setup_and_fix_test() -> ok = machi_partition_simulator:stop() end. -short_doc() -> -" -A visualization of the convergence behavior of the chain self-management -algorithm for Machi. - 1. Set up 4 FLUs and chain manager pairs. - 2. Create a number of different network partition scenarios, where - (simulated) partitions may be symmetric or asymmetric. Then halt changing - the partitions and keep the simulated network stable and broken. - 3. Run a number of iterations of the algorithm in parallel by poking each - of the manager processes on a random'ish basis. - 4. Afterward, fetch the chain transition changes made by each FLU and - verify that no transition was unsafe. - -During the iteration periods, the following is a cheatsheet for the output. -See the internal source for interpreting the rest of the output. - - 'Let loose the dogs of war!' Network instability - 'SET partitions = ' Network stability (but broken) - 'x uses:' The FLU x has made an internal state transition. The rest of - the line is a dump of internal state. - '{t}' This is a tick event which triggers one of the manager processes - to evaluate its environment and perhaps make a state transition. - -A long chain of '{t}{t}{t}{t}' means that the chain state has settled -to a stable configuration, which is the goal of the algorithm. -Press control-c to interrupt....". - -long_doc() -> - " -'Let loose the dogs of war!' - - The simulated network is very unstable for a few seconds. - -'x uses' - - After a single iteration, server x has determined that the chain - should be defined by the upi, repair, and down list in this record. - If all participants reach the same conclusion at the same epoch - number (and checksum, see next item below), then the chain is - stable, fully configured, and can provide full service. - -'epoch,E' - - The epoch number for this decision is E. The checksum of the full - record is not shown. For purposes of the protocol, a server will - 'wedge' itself and refuse service (until a new config is chosen) - whenever: a). it sees a bigger epoch number mentioned somewhere, or - b). it sees the same epoch number but a different checksum. In case - of b), there was a network partition that has healed, and both sides - had chosen to operate with an identical epoch number but different - chain configs. - -'upi', 'repair', and 'down' - - Members in the chain that are fully in sync and thus preserving the - Update Propagation Invariant, up but under repair (simulated), and - down, respectively. - -'ps,[some list]' - - The list of asymmetric network partitions. {a,b} means that a - cannot send to b, but b can send to a. - - This partition list is recorded for debugging purposes but is *not* - used by the algorithm. The algorithm only 'feels' its effects via - simulated timeout whenever there's a partition in one of the - messaging directions. - -'nodes_up,[list]' - - The best guess right now of which ndoes are up, relative to the - author node, specified by '{author,X}' - -'SET partitions = [some list]' - - All subsequent iterations should have a stable list of partitions, - i.e. the 'ps' list described should be stable. - -'{FLAP: x flaps n}!' - - Server x has detected that it's flapping/oscillating after iteration - n of a naive/1st draft detection algorithm. -". - -convergence_demo_testTODO_() -> - {timeout, 98*300, fun() -> convergence_demo_testfun() end}. - -convergence_demo_testfun() -> - convergence_demo_testfun(3). - -convergence_demo_testfun(NumFLUs) -> - timer:sleep(100), - io:format(user, short_doc(), []), - %% Faster test startup, commented: timer:sleep(3000), - - FLU_biglist = [a,b,c,d,e,f,g], - All_list = lists:sublist(FLU_biglist, NumFLUs), - io:format(user, "\nSET # of FLus = ~w members ~w).\n", - [NumFLUs, All_list]), - machi_partition_simulator:start_link({111,222,33}, 0, 100), - _ = machi_partition_simulator:get(All_list), - - Namez = - [begin - {ok, Pid} = machi_flu0:start_link(Name), - {Name, Pid} - end || Name <- All_list ], - - MgrOpts = [private_write_verbose], - MgrNamez = - [begin - {ok, MPid} = ?MGR:start_link(Name, All_list, FLUPid, MgrOpts), - {Name, MPid} - end || {Name, FLUPid} <- Namez], - try - [{_, Ma}|_] = MgrNamez, - {ok, P1} = ?MGR:test_calc_projection(Ma, false), - P1Epoch = P1#projection_v1.epoch_number, - [ok = machi_flu0:proj_write(FLUPid, P1Epoch, public, P1) || - {_, FLUPid} <- Namez, FLUPid /= Ma], - - machi_partition_simulator:reset_thresholds(10, 50), - _ = machi_partition_simulator:get(All_list), - - Parent = self(), - DoIt = fun(Iters, S_min, S_max) -> - io:format(user, "\nDoIt: top\n\n", []), - Pids = [spawn(fun() -> - random:seed(now()), - [begin - erlang:yield(), - S_max_rand = random:uniform( - S_max + 1), - io:format(user, "{t}", []), - Elapsed = - ?MGR:sleep_ranked_order( - S_min, S_max_rand, - M_name, All_list), - _ = ?MGR:test_react_to_env(MMM), - %% if M_name == d -> - %% [_ = ?MGR:test_react_to_env(MMM) || - %% _ <- lists:seq(1,3)], - %% superunfair; - %% true -> - %% ok - %% end, - %% Be more unfair by not - %% sleeping here. - %% timer:sleep(S_max - Elapsed), - Elapsed - end || _ <- lists:seq(1, Iters)], - Parent ! done - end) || {M_name, MMM} <- MgrNamez ], - [receive - done -> - ok - after 995000 -> - exit(icky_timeout) - end || _ <- Pids] - end, - - _XandYs1 = [[{X,Y}] || X <- All_list, Y <- All_list, X /= Y], - _XandYs2 = [[{X,Y}, {A,B}] || X <- All_list, Y <- All_list, X /= Y, - A <- All_list, B <- All_list, A /= B, - X /= A], - _XandYs3 = [[{X,Y}, {A,B}, {C,D}] || X <- All_list, Y <- All_list, X /= Y, - A <- All_list, B <- All_list, A /= B, - C <- All_list, D <- All_list, C /= D, - X /= A, X /= C, A /= C], - %% AllPartitionCombinations = _XandYs1 ++ _XandYs2, - %% AllPartitionCombinations = _XandYs3, - AllPartitionCombinations = _XandYs1 ++ _XandYs2 ++ _XandYs3, - ?D({?LINE, length(AllPartitionCombinations)}), - - machi_partition_simulator:reset_thresholds(10, 50), - io:format(user, "\nLet loose the dogs of war!\n", []), - DoIt(30, 0, 0), - [begin - io:format(user, "\nSET partitions = ~w.\n", [ [] ]),machi_partition_simulator:no_partitions(), - [DoIt(50, 10, 100) || _ <- [1,2,3]], - - %% machi_partition_simulator:reset_thresholds(10, 50), - %% io:format(user, "\nLet loose the dogs of war!\n", []), - %% DoIt(30, 0, 0), - - machi_partition_simulator:always_these_partitions(Partition), - io:format(user, "\nSET partitions = ~w.\n", [Partition]), - [DoIt(50, 10, 100) || _ <- [1,2,3,4] ], - PPP = - [begin - PPPallPubs = machi_flu0:proj_list_all(FLU, public), - [begin - {ok, Pr} = machi_flu0:proj_read(FLU, PPPepoch, public), - {Pr#projection_v1.epoch_number, FLUName, Pr} - end || PPPepoch <- PPPallPubs] - end || {FLUName, FLU} <- Namez], - io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), - - %%%%%%%% {stable,true} = {stable,private_projections_are_stable(Namez, DoIt)}, - {hosed_ok,true} = {hosed_ok,all_hosed_lists_are_identical(Namez, Partition)}, - io:format(user, "\nSweet, all_hosed are identical-or-islands-inconclusive.\n", []), - timer:sleep(1000), - ok - end || Partition <- AllPartitionCombinations - %% end || Partition <- [ [{a,b},{b,d},{c,b}], - %% [{a,b},{b,d},{c,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], - %% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], - %% [{a,b},{b,d},{c,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], - %% [{a,b},{b,d},{c,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] - %% end || Partition <- [ [{a,b}, {b,c}], - %% [{a,b}, {c,b}] ] - %% end || Partition <- [ [{a,b}, {b,c}] ] %% hosed-not-equal @ 3 FLUs - %% end || Partition <- [ [{a,b}], - %% [{b,a}] ] - %% end || Partition <- [ [{a,b}, {c,b}], - %% [{a,b}, {b,c}] ] - %% end || Partition <- [ [{a,b}, {b,c}, {c,d}], - %% [{a,b}, {b,c},{b,d}, {c,d}], - %% [{b,a}, {b,c}, {c,d}], - %% [{a,b}, {c,b}, {c,d}], - %% [{a,b}, {b,c}, {d,c}] ] - %% end || Partition <- [ [{a,b}, {b,c}, {c,d}, {d,e}], - %% [{b,a}, {b,c}, {c,d}, {d,e}], - %% [{a,b}, {c,b}, {c,d}, {d,e}], - %% [{a,b}, {b,c}, {d,c}, {d,e}], - %% [{a,b}, {b,c}, {c,d}, {e,d}] ] - %% end || Partition <- [ [{c,a}] ] - %% end || Partition <- [ [{c,a}], [{c,b}, {a, b}] ] - %% end || Partition <- [ [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}], - %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {b,c}], - %% [{a,b},{b,a}, {a,c},{c,a}, {a,d},{d,a}, {c,d}] ] - %% end || Partition <- [ [{a,b}], - %% [{a,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], - %% [{a,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], - %% [{a,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], - %% [{a,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] - ], - %% exit(end_experiment), - - io:format(user, "\nSET partitions = []\n", []), - io:format(user, "We should see convergence to 1 correct chain.\n", []), - machi_partition_simulator:no_partitions(), - [DoIt(50, 10, 100) || _ <- [1]], - io:format(user, "Sweet, finishing early\n", []), exit(yoyoyo_testing_hack), - %% WARNING: In asymmetric partitions, private_projections_are_stable() - %% will never be true; code beyond this point on the -exp3 - %% branch is bit-rotted, sorry! - true = private_projections_are_stable(Namez, DoIt), - io:format(user, "~s\n", [os:cmd("date")]), - - %% We are stable now ... analyze it. - - %% Create a report where at least one FLU has written a - %% private projection. - Report = unanimous_report(Namez), - %% ?D(Report), - - %% Report is ordered by Epoch. For each private projection - %% written during any given epoch, confirm that all chain - %% members appear in only one unique chain, i.e., the sets of - %% unique chains are disjoint. - true = all_reports_are_disjoint(Report), - - %% Given the report, we flip it around so that we observe the - %% sets of chain transitions relative to each FLU. - R_Chains = [extract_chains_relative_to_flu(FLU, Report) || - FLU <- All_list], - %% ?D(R_Chains), - R_Projs = [{FLU, [chain_to_projection(FLU, Epoch, UPI, Repairing, - All_list) || - {Epoch, UPI, Repairing} <- E_Chains]} || - {FLU, E_Chains} <- R_Chains], - - %% For each chain transition experienced by a particular FLU, - %% confirm that each state transition is OK. - try - [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane(Ps, FLU)} || - {FLU, Ps} <- R_Projs], - io:format(user, "\nAll sanity checks pass, hooray!\n", []) - catch _Err:_What -> - io:format(user, "Report ~p\n", [Report]), - exit({line, ?LINE, _Err, _What}) - end, - %% ?D(R_Projs), - - ok - after - [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez], - [ok = machi_flu0:stop(FLUPid) || {_, FLUPid} <- Namez], - ok = machi_partition_simulator:stop() - end. - -private_projections_are_stable(Namez, PollFunc) -> - Private1 = [machi_flu0:proj_get_latest_num(FLU, private) || - {_Name, FLU} <- Namez], - PollFunc(5, 1, 10), - Private2 = [machi_flu0:proj_get_latest_num(FLU, private) || - {_Name, FLU} <- Namez], - true = (Private1 == Private2). - -all_hosed_lists_are_identical(Namez, Partition0) -> - Partition = lists:usort(Partition0), - Ps = [machi_flu0:proj_read_latest(FLU, private) || {_Name, FLU} <- Namez], - UniqueAllHoseds = lists:usort([machi_chain_manager1:get_all_hosed(P) || - {ok, P} <- Ps]), - Members = [M || {M, _Pid} <- Namez], - Islands = machi_partition_simulator:partitions2num_islands( - Members, Partition), - %% io:format(user, "all_hosed_lists_are_identical:\n", []), - %% io:format(user, " Uniques = ~p Islands ~p\n Partition ~p\n", - %% [Uniques, Islands, Partition]), - case length(UniqueAllHoseds) of - 1 -> - true; - %% TODO: With the addition of the digraph stuff below, the clause - %% below probably isn't necessary anymore, since the - %% digraph calculation should catch complete partition islands? - _ when Islands == 'many' -> - %% There are at least two partitions, so yes, it's quite - %% possible that the all_hosed lists may differ. - %% TODO Fix this up to be smarter about fully-isolated - %% islands of partition. - true; - _ -> - DG = digraph:new(), - Connection = machi_partition_simulator:partition2connection( - Members, Partition), - [digraph:add_vertex(DG, X) || X <- Members], - [digraph:add_edge(DG, X, Y) || {X,Y} <- Connection], - Any = - lists:any( - fun(X) -> - NotX = Members -- [X], - lists:any( - fun(Y) -> - %% There must be a shortest path of length - %% two in both directions, otherwise - %% the read projection call will fail. - %% And it's that failure that we're - %% interested in here. - XtoY = digraph:get_short_path(DG, X, Y), - YtoX = digraph:get_short_path(DG, Y, X), - (XtoY == false orelse - length(XtoY) > 2) - orelse - (YtoX == false orelse - length(YtoX) > 2) - end, NotX) - end, Members), - digraph:delete(DG), - if Any == true -> - %% There's a missing path of length 2 between some - %% two FLUs, so yes, there's going to be - %% non-identical all_hosed lists. - true; - true -> - false % There's no excuse, buddy - end - end. - -endif. % not PULSE -endif. % TEST From 1984c3c35069fec2a35d9c095811b0ddb93bec4f Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 21:08:15 +0900 Subject: [PATCH 16/22] WIP: convergence demo runs, but badly! --- src/machi_chain_manager1.erl | 4 +- test/machi_chain_manager1_converge_demo.erl | 70 +++++++++++++-------- 2 files changed, 46 insertions(+), 28 deletions(-) diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 3859192..241bd1c 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -221,9 +221,9 @@ code_change(_OldVsn, S, _Extra) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% set_active_timer(#ch_mgr{name=MyName, members_dict=MembersDict}=S) -> - FLU_list = [P#p_srvr.name || P <- MembersDict], + FLU_list = [P#p_srvr.name || {_,P} <- orddict:to_list(MembersDict)], USec = calc_sleep_ranked_order(1000, 2000, MyName, FLU_list), - {ok, TRef} = timer:send_interval(USec), + {ok, TRef} = timer:send_interval(USec, yo_yo_yo), S#ch_mgr{timer=TRef}. do_cl_write_public_proj(Proj, S) -> diff --git a/test/machi_chain_manager1_converge_demo.erl b/test/machi_chain_manager1_converge_demo.erl index 963e76d..909202b 100644 --- a/test/machi_chain_manager1_converge_demo.erl +++ b/test/machi_chain_manager1_converge_demo.erl @@ -128,11 +128,11 @@ long_doc() -> n of a naive/1st draft detection algorithm. ". -%% convergence_demo_test_() -> -%% {timeout, 98*300, fun() -> convergence_demo_testfun() end}. +convergence_demo_test_() -> + {timeout, 98*300, fun() -> convergence_demo_testfun() end}. -%% convergence_demo_testfun() -> -%% convergence_demo_testfun(3). +convergence_demo_testfun() -> + convergence_demo_testfun(3). t() -> t(3). @@ -142,33 +142,42 @@ t(N) -> convergence_demo_testfun(NumFLUs) -> timer:sleep(100), - io:format(user, short_doc(), []), + %% Faster test startup, commented: io:format(user, short_doc(), []), %% Faster test startup, commented: timer:sleep(3000), - FLU_biglist = [a,b,c,d,e,f,g], + TcpPort = 62877, + FluInfo = [{a,TcpPort+0,"./data.a"}, {b,TcpPort+1,"./data.b"}, + {c,TcpPort+2,"./data.c"}, {d,TcpPort+3,"./data.d"}, + {e,TcpPort+4,"./data.e"}, {f,TcpPort+5,"./data.f"}], + FLU_biglist = [X || {X,_,_} <- FluInfo], All_list = lists:sublist(FLU_biglist, NumFLUs), io:format(user, "\nSET # of FLus = ~w members ~w).\n", [NumFLUs, All_list]), machi_partition_simulator:start_link({111,222,33}, 0, 100), _ = machi_partition_simulator:get(All_list), - Namez = - [begin - {ok, Pid} = machi_flu0:start_link(Name), - {Name, Pid} - end || Name <- All_list ], - - MgrOpts = [private_write_verbose], + Ps = [#p_srvr{name=Name,address="localhost",port=Port} || + {Name,Port,_Dir} <- lists:sublist(FluInfo, NumFLUs)], + PsDirs = lists:zip(Ps, + [Dir || {_,_,Dir} <- lists:sublist(FluInfo, NumFLUs)]), + FLU_pids = [machi_flu1_test:setup_test_flu(Name, Port, Dir) || + {#p_srvr{name=Name,port=Port}=P, Dir} <- PsDirs], + Namez = [begin + {ok, PPid} = ?FLU_PC:start_link(P), + {Name, PPid} + end || {#p_srvr{name=Name,port=Port}=P, Dir} <- PsDirs], + MembersDict = machi_projection:make_members_dict(Ps), + MgrOpts = [private_write_verbose, {active_mode,false}], MgrNamez = [begin - {ok, MPid} = ?MGR:start_link(Name, All_list, FLUPid, MgrOpts), - {Name, MPid} - end || {Name, FLUPid} <- Namez], + {ok, MPid} = ?MGR:start_link(P#p_srvr.name, MembersDict, MgrOpts), + {P#p_srvr.name, MPid} + end || P <- Ps], + try [{_, Ma}|_] = MgrNamez, {ok, P1} = ?MGR:test_calc_projection(Ma, false), - P1Epoch = P1#projection_v1.epoch_number, - [ok = machi_flu0:proj_write(FLUPid, P1Epoch, public, P1) || + [ok = ?FLU_PC:write_projection(FLUPid, public, P1) || {_, FLUPid} <- Namez, FLUPid /= Ma], machi_partition_simulator:reset_thresholds(10, 50), @@ -240,20 +249,21 @@ convergence_demo_testfun(NumFLUs) -> [DoIt(50, 10, 100) || _ <- [1,2,3,4] ], PPP = [begin - PPPallPubs = machi_flu0:proj_list_all(FLU, public), + {ok, PPPallPubs} = ?FLU_PC:list_all_projections(FLU,public), [begin - {ok, Pr} = machi_flu0:proj_read(FLU, PPPepoch, public), + {ok, Pr} = ?FLU_PC:read_projection(FLU, + public, PPPepoch), {Pr#projection_v1.epoch_number, FLUName, Pr} end || PPPepoch <- PPPallPubs] end || {FLUName, FLU} <- Namez], - io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), + %% io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), %%%%%%%% {stable,true} = {stable,private_projections_are_stable(Namez, DoIt)}, {hosed_ok,true} = {hosed_ok,all_hosed_lists_are_identical(Namez, Partition)}, io:format(user, "\nSweet, all_hosed are identical-or-islands-inconclusive.\n", []), timer:sleep(1000), ok - end || Partition <- AllPartitionCombinations + %% end || Partition <- AllPartitionCombinations %% end || Partition <- [ [{a,b},{b,d},{c,b}], %% [{a,b},{b,d},{c,b}, {a,b},{b,a},{a,c},{c,a},{a,d},{d,a}], %% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], @@ -262,6 +272,7 @@ convergence_demo_testfun(NumFLUs) -> %% end || Partition <- [ [{a,b}, {b,c}], %% [{a,b}, {c,b}] ] %% end || Partition <- [ [{a,b}, {b,c}] ] %% hosed-not-equal @ 3 FLUs + end || Partition <- [ [{a,b}, {b,a}] ] %% end || Partition <- [ [{a,b}], %% [{b,a}] ] %% end || Partition <- [ [{a,b}, {c,b}], @@ -336,23 +347,30 @@ convergence_demo_testfun(NumFLUs) -> %% ?D(R_Projs), ok + catch + XX:YY -> + io:format(user, "BUMMER ~p ~p @ ~p\n", + [XX, YY, erlang:get_stacktrace()]), + exit({bummer,XX,YY}) after [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez], - [ok = machi_flu0:stop(FLUPid) || {_, FLUPid} <- Namez], + [ok = ?FLU_PC:quit(PPid) || {_, PPid} <- Namez], + [ok = ?FLU_C:stop(FLUPid) || FLUPid <- FLU_pids], ok = machi_partition_simulator:stop() end. private_projections_are_stable(Namez, PollFunc) -> - Private1 = [machi_flu0:proj_get_latest_num(FLU, private) || + Private1 = [?FLU_PC:get_latest_epoch(FLU, private) || {_Name, FLU} <- Namez], PollFunc(5, 1, 10), - Private2 = [machi_flu0:proj_get_latest_num(FLU, private) || + Private2 = [?FLU_PC:get_latest_epoch(FLU, private) || {_Name, FLU} <- Namez], true = (Private1 == Private2). all_hosed_lists_are_identical(Namez, Partition0) -> Partition = lists:usort(Partition0), - Ps = [machi_flu0:proj_read_latest(FLU, private) || {_Name, FLU} <- Namez], + Ps = [element(2,?FLU_PC:read_latest_projection(FLU, private)) || + {_Name, FLU} <- Namez], UniqueAllHoseds = lists:usort([machi_chain_manager1:get_all_hosed(P) || {ok, P} <- Ps]), Members = [M || {M, _Pid} <- Namez], From 4f7177067e7577971d1a6c88315c2e5eaf3e22b6 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 9 Apr 2015 21:32:04 +0900 Subject: [PATCH 17/22] WIP: Type fixups --- include/machi_chain_manager.hrl | 8 ++++---- include/machi_projection.hrl | 2 +- src/machi_chain_manager1.erl | 4 ++-- test/machi_chain_manager1_converge_demo.erl | 10 +++++----- test/machi_chain_manager1_test.erl | 7 ++++--- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/include/machi_chain_manager.hrl b/include/machi_chain_manager.hrl index 849dbdc..9382fa6 100644 --- a/include/machi_chain_manager.hrl +++ b/include/machi_chain_manager.hrl @@ -29,13 +29,13 @@ flap_limit :: non_neg_integer(), proj :: projection(), %% - timer :: 'undefined' | reference(), - proj_history :: queue(), + timer :: 'undefined' | timer:tref(), + proj_history :: queue:queue(), flaps=0 :: integer(), flap_start = ?NOT_FLAPPING - :: erlang:now(), + :: erlang:timestamp(), runenv :: list(), %proplist() opts :: list(), %proplist() members_dict :: p_srvr_dict(), - proxies_dict :: orddict:orddict(pv1_server(), pid()) + proxies_dict :: orddict:orddict() }). diff --git a/include/machi_projection.hrl b/include/machi_projection.hrl index ea007c5..dfe043c 100644 --- a/include/machi_projection.hrl +++ b/include/machi_projection.hrl @@ -36,7 +36,7 @@ }). -type p_srvr() :: #p_srvr{}. --type p_srvr_dict() :: orddict:orddict(pv1_server(), p_srvr()). +-type p_srvr_dict() :: orddict:orddict(). -define(DUMMY_PV1_EPOCH, {0,<<0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>}). diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 241bd1c..5552942 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -192,8 +192,8 @@ handle_call({test_set_active, Boolean}, _From, #ch_mgr{timer=TRef}=S) -> {true, undefined} -> S2 = set_active_timer(S), {reply, ok, S2}; - {false, TRef} when is_reference(TRef) -> - timer:cancel(TRef), + {false, _} -> + (catch timer:cancel(TRef)), {reply, ok, S#ch_mgr{timer=undefined}}; _ -> {reply, error, S} diff --git a/test/machi_chain_manager1_converge_demo.erl b/test/machi_chain_manager1_converge_demo.erl index 909202b..4b8df9a 100644 --- a/test/machi_chain_manager1_converge_demo.erl +++ b/test/machi_chain_manager1_converge_demo.erl @@ -128,11 +128,11 @@ long_doc() -> n of a naive/1st draft detection algorithm. ". -convergence_demo_test_() -> - {timeout, 98*300, fun() -> convergence_demo_testfun() end}. +%% convergence_demo_test_() -> +%% {timeout, 98*300, fun() -> convergence_demo_testfun() end}. -convergence_demo_testfun() -> - convergence_demo_testfun(3). +%% convergence_demo_testfun() -> +%% convergence_demo_testfun(3). t() -> t(3). @@ -355,7 +355,7 @@ convergence_demo_testfun(NumFLUs) -> after [ok = ?MGR:stop(MgrPid) || {_, MgrPid} <- MgrNamez], [ok = ?FLU_PC:quit(PPid) || {_, PPid} <- Namez], - [ok = ?FLU_C:stop(FLUPid) || FLUPid <- FLU_pids], + [ok = machi_flu1:stop(FLUPid) || FLUPid <- FLU_pids], ok = machi_partition_simulator:stop() end. diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 20ec7f2..7152d40 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -48,12 +48,12 @@ unanimous_report(Namez) -> UniquePrivateEs = lists:usort(lists:flatten( - [machi_flu0:proj_list_all(FLU, private) || + [element(2, ?FLU_PC:list_all_projections(FLU, private)) || {_FLUName, FLU} <- Namez])), [unanimous_report(Epoch, Namez) || Epoch <- UniquePrivateEs]. unanimous_report(Epoch, Namez) -> - Projs = [{FLUName, case machi_flu0:proj_read(FLU, Epoch, private) of + Projs = [{FLUName, case ?FLU_PC:read_projection(FLU, private, Epoch) of {ok, T} -> T; _Else -> not_in_this_epoch end} || {FLUName, FLU} <- Namez], @@ -128,7 +128,8 @@ extract_chains_relative_to_flu(FLU, Report) -> lists:member(FLU, UPI) orelse lists:member(FLU, Repairing)]}. chain_to_projection(MyName, Epoch, UPI_list, Repairing_list, All_list) -> - ?MGR:make_projection(Epoch, MyName, All_list, + exit({todo_broken_fixme,?MODULE,?LINE}), + machi_projection:new(Epoch, MyName, All_list, All_list -- (UPI_list ++ Repairing_list), UPI_list, Repairing_list, []). From 4334c71a4da1b3e83375bd51c9af657eb3023a92 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Fri, 10 Apr 2015 11:08:17 +0900 Subject: [PATCH 18/22] WIP: compiler warning fixups --- test/machi_chain_manager1_converge_demo.erl | 12 ++++++------ test/machi_partition_simulator.erl | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/test/machi_chain_manager1_converge_demo.erl b/test/machi_chain_manager1_converge_demo.erl index 4b8df9a..cb9176d 100644 --- a/test/machi_chain_manager1_converge_demo.erl +++ b/test/machi_chain_manager1_converge_demo.erl @@ -161,11 +161,11 @@ convergence_demo_testfun(NumFLUs) -> PsDirs = lists:zip(Ps, [Dir || {_,_,Dir} <- lists:sublist(FluInfo, NumFLUs)]), FLU_pids = [machi_flu1_test:setup_test_flu(Name, Port, Dir) || - {#p_srvr{name=Name,port=Port}=P, Dir} <- PsDirs], + {#p_srvr{name=Name,port=Port}, Dir} <- PsDirs], Namez = [begin {ok, PPid} = ?FLU_PC:start_link(P), {Name, PPid} - end || {#p_srvr{name=Name,port=Port}=P, Dir} <- PsDirs], + end || {#p_srvr{name=Name}=P, _Dir} <- PsDirs], MembersDict = machi_projection:make_members_dict(Ps), MgrOpts = [private_write_verbose, {active_mode,false}], MgrNamez = @@ -247,7 +247,7 @@ convergence_demo_testfun(NumFLUs) -> machi_partition_simulator:always_these_partitions(Partition), io:format(user, "\nSET partitions = ~w.\n", [Partition]), [DoIt(50, 10, 100) || _ <- [1,2,3,4] ], - PPP = + _PPP = [begin {ok, PPPallPubs} = ?FLU_PC:list_all_projections(FLU,public), [begin @@ -256,7 +256,7 @@ convergence_demo_testfun(NumFLUs) -> {Pr#projection_v1.epoch_number, FLUName, Pr} end || PPPepoch <- PPPallPubs] end || {FLUName, FLU} <- Namez], - %% io:format(user, "PPP ~p\n", [lists:sort(lists:append(PPP))]), + %% io:format(user, "PPP ~p\n", [lists:sort(lists:append(_PPP))]), %%%%%%%% {stable,true} = {stable,private_projections_are_stable(Namez, DoIt)}, {hosed_ok,true} = {hosed_ok,all_hosed_lists_are_identical(Namez, Partition)}, @@ -337,8 +337,8 @@ convergence_demo_testfun(NumFLUs) -> %% For each chain transition experienced by a particular FLU, %% confirm that each state transition is OK. try - [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane(Ps, FLU)} || - {FLU, Ps} <- R_Projs], + [{FLU, true} = {FLU, ?MGR:projection_transitions_are_sane(Psx, FLU)} || + {FLU, Psx} <- R_Projs], io:format(user, "\nAll sanity checks pass, hooray!\n", []) catch _Err:_What -> io:format(user, "Report ~p\n", [Report]), diff --git a/test/machi_partition_simulator.erl b/test/machi_partition_simulator.erl index 7ef70a3..c7a2f1e 100644 --- a/test/machi_partition_simulator.erl +++ b/test/machi_partition_simulator.erl @@ -198,6 +198,7 @@ partitions2num_islands(Members0, Partition0) -> %% Ignore duplicates in either arg, if any. Members = lists:usort(Members0), Partition = lists:usort(Partition0), +io:format(user, "\npartitions2num_islands(Members, Partition)\n~p ~p\n", [Members, Partition]), Connections = partition2connection(Members, Partition), Cs = [lists:member({X,Y}, Connections) @@ -206,6 +207,7 @@ partitions2num_islands(Members0, Partition0) -> X /= Y], case lists:usort(Cs) of [true] -> 1; + [false] -> many; [false, true] -> many % TODO too lazy to finish end. From 876bf798351811eb9ddf2de4003c6d5b33229fe5 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Fri, 10 Apr 2015 14:15:16 +0900 Subject: [PATCH 19/22] Add debugging & TODO note about using inner projection --- src/machi_chain_manager1.erl | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index 5552942..b3cbd44 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -934,12 +934,23 @@ react_to_env_C110(P_latest, #ch_mgr{name=MyName} = S) -> |Extra_todo]), MyNamePid = proxy_pid(MyName, S), + %% TODO: We need to fetch the inner projection, if it exists, and + %% write it to the private store. Put the outer projection + %% into dbg2 for forensics and perhaps re-start use? ok = ?FLU_PC:write_projection(MyNamePid, private, P_latest2, ?TO), case proplists:get_value(private_write_verbose, S#ch_mgr.opts) of true -> {_,_,C} = os:timestamp(), MSec = trunc(C / 1000), {HH,MM,SS} = time(), + case proplists:get_value(inner_projection, P_latest2#projection_v1.dbg) of + undefined -> + ok; + P_inner when is_record(P_inner, projection_v1) -> + io:format(user, "\n~2..0w:~2..0w:~2..0w.~3..0w ~p uses INNER: ~w\n", + [HH,MM,SS,MSec, S#ch_mgr.name, + machi_projection:make_summary(P_inner)]) + end, io:format(user, "\n~2..0w:~2..0w:~2..0w.~3..0w ~p uses: ~w\n", [HH,MM,SS,MSec, S#ch_mgr.name, machi_projection:make_summary(P_latest2)]); From 0b8ea13f7a6443369f88b19560f6a0cdfee75953 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Fri, 10 Apr 2015 21:59:56 +0900 Subject: [PATCH 20/22] WIP: some TODO cleanup & related refactoring --- include/machi_projection.hrl | 10 +- src/machi_chain_manager1.erl | 211 ++++++++++++-------- src/machi_projection_store.erl | 4 +- test/machi_chain_manager1_converge_demo.erl | 29 ++- test/machi_chain_manager1_test.erl | 6 +- test/machi_partition_simulator.erl | 1 - 6 files changed, 165 insertions(+), 96 deletions(-) diff --git a/include/machi_projection.hrl b/include/machi_projection.hrl index dfe043c..59baf03 100644 --- a/include/machi_projection.hrl +++ b/include/machi_projection.hrl @@ -43,15 +43,15 @@ -record(projection_v1, { epoch_number :: pv1_epoch_n(), epoch_csum :: pv1_csum(), - all_members :: [pv1_server()], - members_dict :: p_srvr_dict(), - down :: [pv1_server()], - creation_time :: pv1_timestamp(), author_server :: pv1_server(), + creation_time :: pv1_timestamp(), + all_members :: [pv1_server()], + down :: [pv1_server()], upi :: [pv1_server()], repairing :: [pv1_server()], dbg :: list(), %proplist(), is checksummed - dbg2 :: list() %proplist(), is not checksummed + dbg2 :: list(), %proplist(), is not checksummed + members_dict :: p_srvr_dict() }). -define(MACHI_DEFAULT_TCP_PORT, 50000). diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index b3cbd44..d17ff4a 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -383,6 +383,9 @@ calc_projection(#ch_mgr{proj=LastProj, runenv=RunEnv} = S, %% OldThreshold: Percent chance of using the old/previous network partition list %% NoPartitionThreshold: If the network partition changes, what percent chance %% that there are no partitions at all? +%% AllHosed: FLUs that we must treat as if they are down, e.g., we are +%% in a flapping situation and wish to ignore FLUs that we +%% believe are bad-behaving causes of our flapping. calc_projection(_OldThreshold, _NoPartitionThreshold, LastProj, RelativeToServer, AllHosed, Dbg, @@ -415,8 +418,8 @@ D_foo=[], %% a real repair process cannot take place until the chain is %% stable, i.e. everyone is in the same epoch. - %% TODO create a real API call for fetching this info. - SameEpoch_p = check_latest_private_projections( + %% TODO create a real API call for fetching this info? + SameEpoch_p = check_latest_private_projections_same_epoch( tl(NewUPI_list) ++ Repairing_list2, S#ch_mgr.proj, Partitions, S), if not SameEpoch_p -> @@ -453,7 +456,7 @@ D_foo=[], Dbg ++ [{ps, Partitions},{nodes_up, Up}]), {P, S#ch_mgr{runenv=RunEnv3}}. -check_latest_private_projections(FLUs, MyProj, Partitions, S) -> +check_latest_private_projections_same_epoch(FLUs, MyProj, Partitions, S) -> FoldFun = fun(_FLU, false) -> false; (FLU, true) -> @@ -462,14 +465,6 @@ check_latest_private_projections(FLUs, MyProj, Partitions, S) -> end, case perhaps_call_t(S, Partitions, FLU, F) of {ok, RemotePrivateProj} -> - %% TODO: For use inside the simulator, this - %% function needs to check if RemotePrivateProj - %% contains a nested inner projection and, if - %% so, compare epoch# and upi & repairing lists. - %% If the nested inner proj is not checked here, - %% then a FLU in asymmetric partition flapping - %% case will appear in the simulator to be stuck - %% in repairing state. if MyProj#projection_v1.epoch_number == RemotePrivateProj#projection_v1.epoch_number andalso @@ -584,8 +579,7 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, #ch_mgr{name=MyName, proj=P_current, flap_limit=FlapLimit} = S) -> ?REACT(a30), - RelativeToServer = MyName, - {P_newprop1, S2} = calc_projection(S, RelativeToServer), + {P_newprop1, S2} = calc_projection(S, MyName), ?REACT({a30, ?LINE, [{newprop1, machi_projection:make_summary(P_newprop1)}]}), %% Are we flapping yet? @@ -600,7 +594,6 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, {P_newprop10, S10} = case get_flap_count(P_newprop3) of - %% TODO: refactor to eliminate cut-and-paste code in 'when' {_, P_newprop3_flap_count} when P_newprop3_flap_count >= FlapLimit -> AllHosed = get_all_hosed(S3), {P_i, S_i} = calc_projection(S3, MyName, AllHosed), @@ -608,37 +601,40 @@ react_to_env_A30(Retries, P_latest, LatestUnanimousP, _ReadExtra, false -> P_i; true -> - P_i#projection_v1{upi=[MyName], - repairing=[], - down=P_i#projection_v1.all_members - -- [MyName]} + P_i#projection_v1{ + upi=[MyName], + repairing=[], + down=P_i#projection_v1.all_members + -- [MyName]} end, - %% TODO FIXME A naive assignment here will cause epoch # - %% instability of the inner projection. We need a stable - %% epoch number somehow. ^_^ - %% P_inner2 = P_inner#projection_v1{epoch_number=P_newprop3#projection_v1.epoch_number}, - FinalInnerEpoch = - case proplists:get_value(inner_projection, - P_current#projection_v1.dbg) of - undefined -> + case inner_projection_exists(P_current) of + false -> AllFlapCounts_epk = [Epk || {{Epk,_FlTime}, _FlCount} <- get_all_flap_counts(P_newprop3)], case AllFlapCounts_epk of [] -> + %% HRM, distrust?... + %% P_newprop3#projection_v1.epoch_number; P_newprop3#projection_v1.epoch_number; [_|_] -> lists:max(AllFlapCounts_epk) end; - P_oldinner -> - if P_oldinner#projection_v1.upi == P_inner#projection_v1.upi + true -> + P_oldinner = inner_projection_or_self(P_current), + if P_oldinner#projection_v1.upi == + P_inner#projection_v1.upi andalso - P_oldinner#projection_v1.repairing == P_inner#projection_v1.repairing + P_oldinner#projection_v1.repairing == + P_inner#projection_v1.repairing andalso - P_oldinner#projection_v1.down == P_inner#projection_v1.down -> - P_oldinner#projection_v1.epoch_number; + P_oldinner#projection_v1.down == + P_inner#projection_v1.down -> + %% HRM, distrust?... + %% P_oldinner#projection_v1.epoch_number; + P_oldinner#projection_v1.epoch_number + 1; true -> P_oldinner#projection_v1.epoch_number + 1 end @@ -666,6 +662,7 @@ react_to_env_A40(Retries, P_newprop, P_latest, LatestUnanimousP, P_newprop#projection_v1.down), if + %% Epoch == 0 is reserved for first-time, just booting conditions. (P_current#projection_v1.epoch_number > 0 andalso P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number) @@ -771,18 +768,20 @@ react_to_env_A40(Retries, P_newprop, P_latest, LatestUnanimousP, true -> ?REACT({a40, ?LINE, [true]}), - react_to_env_A50(P_latest, S) + FinalProps = [{throttle_seconds, 0}], + react_to_env_A50(P_latest, FinalProps, S) end. -react_to_env_A50(P_latest, S) -> +react_to_env_A50(P_latest, FinalProps, S) -> ?REACT(a50), - HH = get(react), - io:format(user, "HEE50s ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- HH, is_atom(X)])]), - %% io:format(user, "HEE50 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(HH)]), + _HH = get(react), +% io:format(user, "HEE50s ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- _HH, is_atom(X)])]), + %% io:format(user, "HEE50 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(_HH)]), - ?REACT({a50, ?LINE, [{latest_epoch, P_latest#projection_v1.epoch_number}]}), - {{no_change, P_latest#projection_v1.epoch_number}, S}. + ?REACT({a50, ?LINE, [{latest_epoch, P_latest#projection_v1.epoch_number}, + {final_props, FinalProps}]}), + {{no_change, FinalProps, P_latest#projection_v1.epoch_number}, S}. react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, Rank_newprop, Rank_latest, @@ -790,8 +789,10 @@ react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, ?REACT(b10), {_P_newprop_flap_time, P_newprop_flap_count} = get_flap_count(P_newprop), - LatestAllFlapCounts = get_all_flap_counts_counts(P_latest), - P_latest_trans_flap_count = my_find_minmost(LatestAllFlapCounts), + _LatestAllFlapCounts = get_all_flap_counts_counts(P_latest), + %% Transitive flap counts are buggy: the current method to observe + %% them is **buggy**. + %% P_latest_trans_flap_count = my_find_minmost(LatestAllFlapCounts), if LatestUnanimousP -> @@ -804,12 +805,14 @@ react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, %% I am flapping ... what else do I do? ?REACT({b10, ?LINE, [i_am_flapping, {newprop_flap_count, P_newprop_flap_count}, - {latest_trans_flap_count, P_latest_trans_flap_count}, + %% {latest_trans_flap_count, P_latest_trans_flap_count}, {flap_limit, FlapLimit}]}), _B10Hack = get(b10_hack), %% if _B10Hack == false andalso P_newprop_flap_count - FlapLimit - 3 =< 0 -> io:format(user, "{FLAP: ~w flaps ~w}!\n", [S#ch_mgr.name, P_newprop_flap_count]), put(b10_hack, true); true -> ok end, io:format(user, "{FLAP: ~w flaps ~w}!\n", [S#ch_mgr.name, P_newprop_flap_count]), +%io:format(user, "FLAP: ~w flapz ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse([X || X <- get(react), is_atom(X)])]), + if %% So, if we noticed a flap count by some FLU X with a %% count below FlapLimit, then X crashes so that X's @@ -818,39 +821,53 @@ react_to_env_B10(Retries, P_newprop, P_latest, LatestUnanimousP, %% detected by our own failure detector and get us out of %% this current flapping situation, right? TODO %% - %% 2015-04-05: If we add 'orelse AllSettled' to this 'if' - %% clause, then we can end up short-circuiting too - %% early. (Where AllSettled comes from the runenv's - %% flapping_i prop.) So, I believe that we need to - %% rely on the failure detector to rescue us. + %% 2015-04-10: TODO Flap count detection, as it has + %% been attempted before now, is buggy. %% - %% TODO About the above ^^ I think that was based on buggy - %% calculation of AllSettled. Recheck! + %% MEANWHILE, we have learned some things about this + %% algorithm in the past few months. With the introduction + %% of the "inner projection" concept, we know that the inner + %% projection may be stable but the "outer" projection will + %% continue to be flappy for as long as there's an + %% asymmetric network partition somewhere. We now know that + %% that flappiness is OK and that the only problem with it + %% is that it needs to be slowed down so that we don't have + %% zillions of public projection proposals written every + %% second. %% - %% TODO Yay, another magic constant below, added to - %% FlapLimit, that needs thorough examination and - %% hopefully elimination. I'm adding it to try to - %% make it more likely that someone's private proj - %% will include all_flap_counts_settled,true 100% - %% of the time. But I'm not sure how important that - %% really is. - %% That settled flag can lag behind after a change in - %% network conditions, so I'm not sure how big its - %% value is, if any. -% QQQ TODO -% P_latest_trans_flap_count >= FlapLimit + 20 -> -% %% Everyone that's flapping together now has flap_count -% %% that's larger than the limit. So it's safe and good -% %% to stop here, so we can break the cycle of flapping. -% ?REACT({b10, ?LINE, [flap_stop]}), -% react_to_env_A50(P_latest, S); + %% It doesn't matter if the FlapLimit count mechanism + %% doesn't give an accurate sense of global flapping state. + %% FlapLimit is enough to be able to tell us to slow down. - true -> - %% It is our moral imperative to write so that the flap - %% cycle continues enough times so that everyone notices - %% and thus the earlier clause above fires. - ?REACT({b10, ?LINE, [flap_continue]}), - react_to_env_C300(P_newprop, P_latest, S) + true -> %% P_latest_trans_flap_count >= FlapLimit -> + %% We already know that I'm flapping. We need to + %% signal to the rest of the world that I'm writing + %% and flapping and churning, so we cannot always + %% go to A50 from here. + %% + %% If we do go to A50, then recommend that we poll less + %% frequently. + {X, S2} = gimme_random_uniform(100, S), + if X < 80 -> + ?REACT({b10, ?LINE, [flap_stop]}), + ThrottleTime = if FlapLimit < 500 -> 1; + FlapLimit < 1000 -> 5; + FlapLimit < 5000 -> 10; + true -> 30 + end, + FinalProps = [{my_flap_limit, FlapLimit}, + {throttle_seconds, ThrottleTime}], +io:format(user, "<--x=~w-.--~w-~w-~w->", [X, MyName, P_newprop_flap_count,FlapLimit]), + react_to_env_A50(P_latest, FinalProps, S2); + true -> + %% It is our moral imperative to write so that + %% the flap cycle continues enough times so that + %% everyone notices then eventually falls into + %% consensus. + ?REACT({b10, ?LINE, [flap_continue]}), +io:format(user, "<--x=~w-oooo-~w-~w-~w->", [X, MyName, P_newprop_flap_count,FlapLimit]), + react_to_env_C300(P_newprop, P_latest, S2) + end end; Retries > 2 -> @@ -943,10 +960,11 @@ react_to_env_C110(P_latest, #ch_mgr{name=MyName} = S) -> {_,_,C} = os:timestamp(), MSec = trunc(C / 1000), {HH,MM,SS} = time(), - case proplists:get_value(inner_projection, P_latest2#projection_v1.dbg) of - undefined -> + case inner_projection_exists(P_latest2) of + false -> ok; - P_inner when is_record(P_inner, projection_v1) -> + true -> + P_inner = inner_projection_or_self(P_latest2), io:format(user, "\n~2..0w:~2..0w:~2..0w.~3..0w ~p uses INNER: ~w\n", [HH,MM,SS,MSec, S#ch_mgr.name, machi_projection:make_summary(P_inner)]) @@ -957,9 +975,9 @@ react_to_env_C110(P_latest, #ch_mgr{name=MyName} = S) -> _ -> ok end, - react_to_env_C120(P_latest, S). + react_to_env_C120(P_latest, [], S). -react_to_env_C120(P_latest, #ch_mgr{proj_history=H} = S) -> +react_to_env_C120(P_latest, FinalProps, #ch_mgr{proj_history=H} = S) -> ?REACT(c120), H2 = queue:in(P_latest, H), H3 = case queue:len(H2) of @@ -979,7 +997,7 @@ react_to_env_C120(P_latest, #ch_mgr{proj_history=H} = S) -> %% io:format(user, "HEE120 ~w ~w ~p\n", [S#ch_mgr.name, self(), lists:reverse(HH)]), ?REACT({c120, [{latest, machi_projection:make_summary(P_latest)}]}), - {{now_using, P_latest#projection_v1.epoch_number}, + {{now_using, FinalProps, P_latest#projection_v1.epoch_number}, S#ch_mgr{proj=P_latest, proj_history=H3}}. react_to_env_C200(Retries, P_latest, S) -> @@ -1384,8 +1402,20 @@ projection_transition_is_sane( io:format(user, "|~p,~p TODO revisit|", [?MODULE, ?LINE]), ok; - not RetrospectiveP -> - exit({upi_2_suffix_error}) + true -> + %% The following is OK: We're shifting from a + %% normal projection to an inner one. The old + %% normal has a UPI that has nothing to do with + %% RelativeToServer a.k.a. me. + %% from: + %% {epoch,847},{author,c},{upi,[c]},{repair,[]},{down,[a,b,d]}, + %% to: + %% {epoch,848},{author,a},{upi,[a]},{repair,[]},{down,[b,c,d]}, + if UPI_2_suffix == [AuthorServer2] -> + true; + not RetrospectiveP -> + exit({upi_2_suffix_error, UPI_2_suffix}) + end end end end, @@ -1492,6 +1522,29 @@ merge_flap_counts([FlapCount|Rest], D1) -> proxy_pid(Name, #ch_mgr{proxies_dict=ProxiesDict}) -> orddict:fetch(Name, ProxiesDict). +gimme_random_uniform(N, S) -> + RunEnv1 = S#ch_mgr.runenv, + Seed1 = proplists:get_value(seed, RunEnv1), + {X, Seed2} = random:uniform_s(N, Seed1), + RunEnv2 = [{seed, Seed2}|lists:keydelete(seed, 1, RunEnv1)], + {X, S#ch_mgr{runenv=RunEnv2}}. + +inner_projection_exists(P) -> + case proplists:get_value(inner_projection, P#projection_v1.dbg) of + undefined -> + false; + _ -> + true + end. + +inner_projection_or_self(P) -> + case proplists:get_value(inner_projection, P#projection_v1.dbg) of + undefined -> + P; + P_inner -> + P_inner + end. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% perhaps_call_t(S, Partitions, FLU, DoIt) -> diff --git a/src/machi_projection_store.erl b/src/machi_projection_store.erl index f4b9074..4a68aa1 100644 --- a/src/machi_projection_store.erl +++ b/src/machi_projection_store.erl @@ -262,11 +262,11 @@ do_proj_write(ProjType, #projection_v1{epoch_number=Epoch}=Proj, S) -> EpochT = {Epoch, Proj#projection_v1.epoch_csum}, NewS = if ProjType == public, Epoch > element(1, S#state.max_public_epoch) -> - io:format(user, "TODO: tell ~p we are wedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), + %io:format(user, "TODO: tell ~p we are wedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), S#state{max_public_epoch=EpochT, wedged=true}; ProjType == private, Epoch > element(1, S#state.max_private_epoch) -> - io:format(user, "TODO: tell ~p we are unwedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), + %io:format(user, "TODO: tell ~p we are unwedged by epoch ~p\n", [S#state.wedge_notify_pid, Epoch]), S#state{max_private_epoch=EpochT, wedged=false}; true -> S diff --git a/test/machi_chain_manager1_converge_demo.erl b/test/machi_chain_manager1_converge_demo.erl index cb9176d..1d2c537 100644 --- a/test/machi_chain_manager1_converge_demo.erl +++ b/test/machi_chain_manager1_converge_demo.erl @@ -237,8 +237,12 @@ convergence_demo_testfun(NumFLUs) -> io:format(user, "\nLet loose the dogs of war!\n", []), DoIt(30, 0, 0), [begin + %% io:format(user, "\nSET partitions = ~w.\n", [ [] ]),machi_partition_simulator:no_partitions(), + %% [DoIt(50, 10, 100) || _ <- [1,2,3]], + io:format(user, "\nLet loose the dogs of war!\n", []), + DoIt(30, 0, 0), io:format(user, "\nSET partitions = ~w.\n", [ [] ]),machi_partition_simulator:no_partitions(), - [DoIt(50, 10, 100) || _ <- [1,2,3]], + [DoIt(10, 10, 100) || _ <- [1]], %% machi_partition_simulator:reset_thresholds(10, 50), %% io:format(user, "\nLet loose the dogs of war!\n", []), @@ -251,8 +255,8 @@ convergence_demo_testfun(NumFLUs) -> [begin {ok, PPPallPubs} = ?FLU_PC:list_all_projections(FLU,public), [begin - {ok, Pr} = ?FLU_PC:read_projection(FLU, - public, PPPepoch), + {ok, Pr} = todo_why_does_this_crash_sometimes( + FLUName, FLU, PPPepoch), {Pr#projection_v1.epoch_number, FLUName, Pr} end || PPPepoch <- PPPallPubs] end || {FLUName, FLU} <- Namez], @@ -269,10 +273,12 @@ convergence_demo_testfun(NumFLUs) -> %% %% [{a,b},{b,d},{c,b}, {b,a},{a,b},{b,c},{c,b},{b,d},{d,b}], %% [{a,b},{b,d},{c,b}, {c,a},{a,c},{c,b},{b,c},{c,d},{d,c}], %% [{a,b},{b,d},{c,b}, {d,a},{a,d},{d,b},{b,d},{d,c},{c,d}] ] - %% end || Partition <- [ [{a,b}, {b,c}], - %% [{a,b}, {c,b}] ] + end || Partition <- [ [{a,b}, {b,c}], + [{a,b}, {c,b}] ] %% end || Partition <- [ [{a,b}, {b,c}] ] %% hosed-not-equal @ 3 FLUs - end || Partition <- [ [{a,b}, {b,a}] ] + %% end || Partition <- [ [{b,d}] ] + %% end || Partition <- [ [{a,b}, {b,a}] ] + %% end || Partition <- [ [{a,b}, {b,a}, {a,c},{c,a}] ] %% end || Partition <- [ [{a,b}], %% [{b,a}] ] %% end || Partition <- [ [{a,b}, {c,b}], @@ -359,6 +365,17 @@ convergence_demo_testfun(NumFLUs) -> ok = machi_partition_simulator:stop() end. +todo_why_does_this_crash_sometimes(FLUName, FLU, PPPepoch) -> + try + {ok, _}=Res = ?FLU_PC:read_projection(FLU, public, PPPepoch), + Res + catch _:_ -> + io:format(user, "QQQ Whoa, it crashed this time for ~p at epoch ~p\n", + [FLUName, PPPepoch]), + timer:sleep(1000), + ?FLU_PC:read_projection(FLU, public, PPPepoch) + end. + private_projections_are_stable(Namez, PollFunc) -> Private1 = [?FLU_PC:get_latest_epoch(FLU, private) || {_Name, FLU} <- Namez], diff --git a/test/machi_chain_manager1_test.erl b/test/machi_chain_manager1_test.erl index 7152d40..89f586a 100644 --- a/test/machi_chain_manager1_test.erl +++ b/test/machi_chain_manager1_test.erl @@ -230,8 +230,8 @@ nonunanimous_setup_and_fix_test() -> %% we expect nothing to change when called again. {not_unanimous,_,_}=_YY = ?MGR:test_read_latest_public_projection(Ma, true), - {now_using, EpochNum_a} = ?MGR:test_react_to_env(Ma), - {no_change, EpochNum_a} = ?MGR:test_react_to_env(Ma), + {now_using, _, EpochNum_a} = ?MGR:test_react_to_env(Ma), + {no_change, _, EpochNum_a} = ?MGR:test_react_to_env(Ma), {unanimous,P2,_E2} = ?MGR:test_read_latest_public_projection(Ma, false), {ok, P2pa} = ?FLU_PC:read_latest_projection(Proxy_a, private), P2 = P2pa#projection_v1{dbg2=[]}, @@ -242,7 +242,7 @@ nonunanimous_setup_and_fix_test() -> %% Poke FLUb to react ... should be using the same private proj %% as FLUa. - {now_using, EpochNum_a} = ?MGR:test_react_to_env(Mb), + {now_using, _, EpochNum_a} = ?MGR:test_react_to_env(Mb), {ok, P2pb} = ?FLU_PC:read_latest_projection(Proxy_b, private), P2 = P2pb#projection_v1{dbg2=[]}, diff --git a/test/machi_partition_simulator.erl b/test/machi_partition_simulator.erl index c7a2f1e..fbbdcb9 100644 --- a/test/machi_partition_simulator.erl +++ b/test/machi_partition_simulator.erl @@ -198,7 +198,6 @@ partitions2num_islands(Members0, Partition0) -> %% Ignore duplicates in either arg, if any. Members = lists:usort(Members0), Partition = lists:usort(Partition0), -io:format(user, "\npartitions2num_islands(Members, Partition)\n~p ~p\n", [Members, Partition]), Connections = partition2connection(Members, Partition), Cs = [lists:member({X,Y}, Connections) From 67e5795edde66c0a2178c6a8e8910e05b114da95 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Fri, 10 Apr 2015 22:01:12 +0900 Subject: [PATCH 21/22] Update rebar --- rebar | Bin 193108 -> 192951 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/rebar b/rebar index 03c9be6c6ac439422dae9343a4648853abf00c34..146c9aa5a5468b184ddff62ae377f39f7e0dd951 100755 GIT binary patch delta 67784 zcmY(KLtvPV)~#bZjcqr!8{0M-+kS%vjcwbuZ98df+q~a@W_PsKY!5fqvlkEGTlV2& ziQ*Uu!96s>;}l8$CoYu#Cv{oRV2J;zIKtJTH9!agvXPJnP65OpvAK*lqH8asNzx=L z^4Ka<>lwC(6gB>a)|w~JDh^;4cBh?Y+&IyV%US3i%*t&%>q9EKeC<<5y+Qpz~^?6?)`UF>vU%`g7`R*ol3pMde zLRocgUikHlTsA^)3e>FcH)xz3ILjOOYiQgO_7u-wp?ai0uM-e%9U;HaZW6YvV62TD zRk?JK>iW#8$E=(g=7z-dJK4bD_w;1^J4d3;G!h`ePX#>f8Nj%S8`Cvrp4+;#KMoyv zUF<-S+O7DW{EGwK7h6e9nF(_u1_vfqg6Vib;fS#U0}H#BugZ4GtU57#ivCS#oIvEZ z@d<=(77U+ta9@Dc7|yq-AA;k-I)c(1n%` zY`BZ^29Pz7_J*FgG`U`k(WxWq$4KgHnQC?lV*bxoBz_#;a=xvsvpdLdY`M?pVE8&^C6S>$vkpz#F)RwRYhyf1#mGPx$ijv{bz{ve#B1EEI)qZBg) z-3DeqjYe&EWK;A@@0Qj!wxn~a*0*+ciVh^QzQS%+gf_~1ZN?xF;*#Z>VEwi%2Z;MO z1yTvLTR2BMUWCa1u#Q5L@BV59EbT8xyul_>pgAi{MDT7lyquo#`4V?znIXEih$zU z+PvUgs62*XShYD4zWPeQIXqaUY)rdj<;oJ}u358Ezt^# zeEJZxFsF4Ptau{?bG10q682e-9zb^eIpWZWSqDMXq_D3y&g*B$0d~{L$~=_(l!KSD zNv<;DpjjPit3^IVLC_=xj)^#Q!bIwQ0`H{OQFQBFSouEc)iaq@4{5Zi%fa!Hf@3;Q zvFHsB!L)8kZT}*Ps0Ry=`a$(-`^;h;d%ykZD(3~FiDqKJVWgL1gSOZs3!sFM27Q+Q zXdIDV+zBGa2Nr=4tr}r&IES^vHrKdX&Y{R%P>Rih9zqfW5mY_!L>z6()%2%}K;U^I z$C#I;md$Z^-UvZMi7ZTAMWQ?DsFn<&Fzf^kTuyg{4U>zsZ;DzQs(|XAjn*nK+#&|d z92D~hnf;(^zG!45lDZdcHjv%7NXXKu>5oO%XL2X0J=KK69dcmDiUw3BA8M9E4@R7! zjx;GX{tOAHL?JkH_`#5O0P-6@TH|=xa5WrNXCxlNcyP)CEMUN&zJ03V+C#vR{q z*jWMf5g}rZnHKC344tHx^bB&BU8!rV6B|P$cT1=@(^qs6kL!^w0C}c! zRzKmk#>qS{D4Hl#txujXIOO6htx0}rSQN6E?d3wMJY5kVLN6td7N_Ub)gi4F#l6U9vhSO zhXLBVGAjI5q|XC2ZHG9B>eNs=<;tl3N)(kM=qF}BpJth)IKWWUZ)S)ats*~ZtSJIKtVwzfNbm%6mW$Hi9F-*R z0I8M;TbRs_3qY2OEgS}!JCs&xI`0KBX<@9E`G^3b0KLZF#-}XZdqB(1j$g47DhEm( z46Ab2hAo+)9`esP8cIvc!T3)z!lRQ+ecd5h-iezV7_I=(9hA30Ke;U^Wm63N0xnMK zqCsQS>j;9(u_T$vBEChVWKe$&_GV=aR^K|SYY1S6s+%sB`l z4G2{KkhCe&ekP%}17=ziLJ`(hw@9wg!VkKTbs*a+-zl#UJvY0O*n_n98hIi%S&=lQ zd9WW$hNc#~u=(GZ5@tU$As3|-e)}y6eS(N4CCJ2&wBNsACR8W$={Ue*!ivkZ%Kv~% z^aiNj^oz!`g72Lwri05aTCiZ$vxVY{-Z_6$4BQsxFzu;CELm<7`N z{Q!o9CAnzGtzcjcQ-iNX(~BYyBGqgmYeu^db|9`4+jfZgj{lyZ-dMuEuo;{Zwkva! znVZmQTH)Kx@849ZfcUnD2|<#kgG&i2vLMlClXKR!t+rv7$3hq*u_p@!lcqx#A@DnH zDI-RqEt1bxLD2Sp)doOhUZIsFnGn+y2LYAX8&O;h8(9-|GmASpQah}?#xp|7k%!oS ziPajxEkJzm4ToNt^+n}te?yVqLHO98nz77ps)3H8Nhl5NmfI zz?;4yQ= z99)Nkpw0PTTn;Zr%ODORv((8yX-z8I|1O8Us?*^bF0ECWl9-5rkNu)!iGKk&h2IFs zKPjbqf{M0D$iaK0B7OQ)!z@rso&3jqpxAT%(wp33Kr5_*MK}7-=OLJf42G8LB9tev zP`7+c7L`Y~i92@(J00&sXW~?cQOxVfl*_47lw}n1L8X8>S|A;gDpDxI94OZh{&Ip% zrd%e@m&jXpd^^W!&GwXxqeTI%3K2toQTB~uF!hFW5}$79#kiV+8PE?*5j>$sB5NJ# zl)^!c$e+cLT0uM(e{T|EhFNf@UVLVmwy(%_v1d69 z&AANq!CQA0Zh1FdteKs7ffQr* z3W5Una>TQkCjcN+20pHyy9(2Y$U~xd7ErJE)`7x4fR03sXt)uLa3+P8Ez?TtPpjd* zG9d4D5*+9?|BKy3k+hRnxSuq(mQ^`8$D|D9*%oA+^n2*fyu;3vzYFh`okzYBPb_`! z3PK?&^!14&@on-&oK;Swh&q^R-7gwAQF|`7!b<4~^i*|5WL*?C6pB zyHEJ%_`zWJ%su}Z^}tPK+A$Xr%zg%C;uEB6246qiDfz@%4EU+g^1VDaPjs8-Ct#SD zO@ZSi&BO#?e20#@JB<`ziKKxejP$e$o3w;Oj$WY;j^5|e*k#n#``Qb-|0E7t!qh$79zYM4%-xsPJ?V$)VDxGj) zRvH6X`i$_qmV@X0UYP^&46Dul_1lG&w>?WtM>HlV^JJhA>lFzeOn|7VI#&|;yzb5ypn@J~rXhZ4p8ZEqHjky}PctXflRRwrMvnz1>*At4d5VYAEOi%v$0-G#8tI$M= zD`g=|1ir6iN(~$xU-u8}Dn08<&&_m90%s-t&^4_#QMYs}yve}#l>7I~714K?_dzhS zo&2}R$WZWg*ImIOrl8l%WXZSY1+J#9gQ4sCvy2;2`X)zVy3KV3ErHF(&1={a9dN}k zz9BFj%T3H-y~DfEgTE4g#79`_!xZtouvO2v`XbPcJ*j(A7!^(n*3ScQr*JwGc>U`1om}?*Huk;W@frPRI4U?wk-p zkZI7);`20H8uS#v_`P-`|J;C6_bN9PN^kVS^tD)M zP4SgHX;Nj$*wSvLf{VNyY1XXYUHj-UaZBf(AES3VZ9t?dZ}v!KkH3-127E5Beq=yi z8|V-^pI|%hjFhqtu*P)0J*9OMb)K|(UhS+$T+6!-ZCQMo=--_cFm4I77Z__O*R(ig z2I;Rf{=`vA{}}sxSnj~;p{t%`d479)jrn#xo_@{n9O4xu8SnO-dtW_N`8^S#Pu+zF zw9T~T*G)a$vtK{=uo_+fz_-!!`R>yu<)YimS<*mV!_!HOkLKOmCMOX;+gI2>E~L~g@3-cgXF*1aMOMZ}C8v7rX=3+IW+mIR)>QL=ii+Wy>ZgSk|7Qs#!@^S~UtRll)`(f=zt+!U zGYhXbz4zq~OZQ9Y@Soo|Lrv~T@~KH{9-k5X+=C@}NY^&JJm;TaSYE}4Dvx$G??*^8 z_e++~U#5P_UFVy7KnOJE(^egk4ARBxx*xIkdnM!6&uH$iz8P0$h4|w1%rv2AO$DlyIG~@bd zWH)ik89np_z-99Y?pmOFUr&TUZ`!u#cdR@NCO!N8jObKUY0CX;|s$YCbXkK7w9gTS*@zO0nhfcosfY;cOeY1b*6f zu0374sD)b%B%MSJl^pqrG(sR#(m-CLAP0z@skKpC=<6?UPzw>+7 zs0<%_{g))m7!TLWt@;KDDj)R??CzH$=jyjXvhB*IOK(n!vfFy>)6PO7p365fBhKfy zcY|)X4w5G)F~@M{l_8PxLz(4MP9LE8N%bA5-%U^eo_%Lf8?q6uF&ky@UXClB*E4d`}1zWD2| zir&{g(g=>eQ9(dJzQ5JtVKrq9h(_$`LX}HznwhJh8CVf;Ej2bp<>Vrs(vH*@WD@|h zJY{VuX=z&2BC&`Ys{9|nQ zGpdrQ$GDuw%mVuR+>EAS)LNFDI1_={P3AjY<{mL8gO2+{%8Fgo6b>}os(U8iLVd_*L=7;@RPac$ z)245g_R=>h=M@ZhyEp#uSI%RckTGSJi?J@yW%${%bmb5qRZ;M>`L zkE&QfX=-_${adqTE)ZvA{Q!`4ts&QWEx+M#MSJ;hD`(VHt&qB-Dd%StFjHB>=68F0 zuiNdc$>C*Z=TJT&qQeqcJGLxky_eFfps`7US5eubiYD@U1|A!y*w z?T>;53v_1WO)InhEI^8b;%M`1z0i1MBm(}SY?~lzZZVzF&es-WD;iZ{D&GBNasok6 zNw62^gPZ$Xe~eA1!aKb0tg5@od3R?rP=LEYBD_jsTm>PzV|kP z*G{L|l|sdqu3JY(5p+3YuNtkrPUeEXIuw}Df zPO0?J{EhL;-&&tZSO65pK2#x-e^zpi$3rlgh|jDPPaM5=bSuA^{f^`2iew)#)7Of7 z@yO0^j$f+Db43>W(h6`6f9A~6y5dwKh~?P*fk#Kl_#jPhd4m1=dn;JyP#mou-V9fKJlU^!J zBKGfPv>VTn7~OF1g)VNQupzoSS!D$;0o-YtG+U2W=YQHQFI=q+n(~0FpG)3&Jsa)p zFB*~v?`Df9A9+t=3d=yz0 zW}NucS!zHVd4phvRSVgNAXS!1I>kfzKh1BCK6;Yi#po&!Cs z+%PlMUF@v#HjFv9BL_G>`!DNVdh+R72xycqe4RK+|CwRS`GpD5(R$Rj%U@sDkbnUO ztc&Jap2lc1gGP;$g&Ma54a+>?Vd$=R{d%{hesJ1`)+L#orQIFhB7I{3D_Ty(*AH}S@L0D+= zc*p*CIIKq;jPV4T^f&(?#x+jQ1j@nnT#U1>*DJg~WG*P3A@oH6x&xG5FTrd8^bG{U z4rHe=2n-m#e*hB-5|=-`u|G;!9})`uo-o)dq;el&ENp$RUo1RQphzwZKGe_-H8tpn zFby>{MlPIcP(m(FqrclM6hP=Nk&DF#9<&4L0jVFvX^0RRguH{d7(77?i~Ebr0zpFv z-2!SX7EVJ1z7@&NKY12vy+3Le(k}377TimO=mZ%j7FH`3k&FcEE*B--kWV&nj0v*1 zf71bZ(tpiCkPpOp*XRZ1saJm$8F~ldBX~R?j#?;b2U!(^U4-U@N5}=S9keKqqm=b5 zd|nB85&iMkry{@xgD9-z0Po`8u|rr2>ALfS3voK&eg`c71JL*)Qj`IQ`0?L_0|WA3 zoZaw`~g|BJjg4zp##P(VQZaT9JOS%84SpMTh6 z_!NIU-bwkB53mi9fl3!r%g6+hKNJ!RBS*wT2KL4U!hkU$#ftJa<48rHO0d+UceLTO z6i87bD`Z8dbGg2ys#ftXxW&t36rGx~>}yM9O+Q>p5ghM&%-bVN;CVG>z6N}|eXsbv ze}7*Em_Q>vO3shp4-|;JhXS-i+)~Qjv{Lh^RW|ZP6J=$Mzl&Lh+LV9VCnq^I%_?Tv zvP`_X7!8}Gy`G9oL{6qAEvD@yE~T=Ul(RM#XE07v=MMW!murUq7T7CGte~W1U8+yw zwVqdQ({!RNn-$EM)|mRuzGOv_(xP4L9BgBSn_acI=rhh|&_?CFJ_$?~-&t0~r_-4c zw5SXjXa6u)NwT7`|H(RWXxui;$@Aev#~B)0#9la^v``eUWtS;`%&9Y=gw2HASmFfy1gsQ(PXbOYysywIoGt{`amLHQKF`1qE_rImwERq(arLY= z$8oqhL5cIwukys|Ogo^lb}`)TSTqKpVKeF>(XdJXnS4Btp-|1F_b`=iM#B!C!1{&D zl4fkg!?_RQrhk*|eCm|P%&$hTk<6{M9B3vZPE?kzw!oS-eaH_X{6DR1$`_&bw1P`vMEROGLj+Qsd?VXnl7QkXy~;%JEf87tt4?| zDgNE3!XVtxR=<*^6QWm1svDr+AXIkcskZVT03p$Fmo`gN&`~mRVwGKu$&!T6L6M-K z!Ek?7&mVhvstj~kk?ezNE9$Sfrl_7?Rj|&qEAnEOjkRzoe7fGtshrZYRB$K}VKqpf zIzv{1nMM3QnqHY44A)YLPdAY0Dp$Uof#OHjrmAROpKZ^cXwSLFL`DwrARlca*I>(5 z^cqw`g3%fYL8&i^zr6#n^& zh1sgSi<^(Pn>=g}@v8768{q~!Fv{tYX~y&tvZl{dbzeF*hWhDuO`JdDI~=|+H%&jH z*h|y4bvOqI}|W$!PHTz9~P#)Uhcwy$l7&k`dJhZ5*nt5vB_ISbr#5lN24>lOx5E_7eaz!H22GrkVkQpRCx%V`kxK;Ju;3 zT55GCmjPkuAoj~|<;Bv5>1n~uFQ}{=X_*#%DF?;7I?@5vPspbGX5mOpW>qSnV8Gs( zp%+pfv_=SSVB#6K3RaGiVWUOcu~ofi(35;P2$c|iy^sJ*6KT$-9_(s`+!Spmt}s;| z$`ha;Yr{3CWnKQ zkB3zLt3a-88s})1p9{s1!c$)~q|zyQyd(alf3378RS3xRQVQQZ=lKr?dgzYW%vKn9 z#|E3$FZ;jc_AfVqTf5maiex^wTjj+pDY`)2F(E9Wj87{q>D3{+Yi`$?`Vw%gQv!G- z4vQFDjP;XtcSL4|f!1xr5 zwmy!w=E^SLzZ4GVot+0N9xLZcrb2hl%ccFv#7C!yEGCXO6G^O6pLLIw#xrt_Ax)6u zI0d`gTy1}c(!{ASnzKE}F=NS^J-J0ca z48h0Uf)N`P&TO0mZJ9NDWH}gZ)BB(rg%KOzy9t*s&wqEVhLF70r{3gD}f zd4%ndFruBmQd3xpXqB=d3(UACX}Xv&Bt)P;XU$?hKW9K#Ma za9j20(0>0D*=P}SK6<}Sbzinf2*IExZ8VW-wxCGUh zYH3Qaje33iH^_});Gpg+%mx@|AoS;1Nt(0=>T5<|M4{=Kc4z?4%?Y74wknT<7P1gN zS~{|lNL$EW!7oT46k~3Mhy<7cS=>N)j5!?v)`COAz>>BqJnuPKJ=@ z@ng+Mx7=Y_KP$;gQ!(u}?b?jd|12p^*NCaXpK6FE^KR5(0Hn2+W_I33ls=SKp+ta)9r(b^afn>aW87aZkcY*4WaF8 zMOe5;_caoswUjACJ-|CDhnC28GH+q7 z6<*Ry<+*Pg8~-I@2Atl)xNkN<%DMMPDrv`6=^#FOn@- zn=Ay4(9!3}6%AvMYpEP&`F8Qym{pz7UhV+HNzai;E=CZ+lrePL6^W*hol-OxhBS5= zg>?<=xQCj`jF)D(_p-Vr|cjvq(az%H2 zsB_^S?B5E1f9vjbi-a?kqUqk|lp8E2X_M#Fqlc0ImCU{}j|exAAm7{6L>;jL^Qr(u zi5&TM<%#u`GBq~5%p;VZA|0M&yIw`7J{maE0zS#Yp1B=IEl#9x?;JwoD-)3;u%0%Z zDSRdZ4G{1AaW~yxDPyt7gQ!QLm4zmR{1m}U*LsZIiT3^IUCE-^mv>c}1URz7INrKpj!;p+-KB3ED7jN5i> zx3=kg?{(|NxRK<;rreakQq{3;>hd;a&g zW%==QCGSpSxo7*;%kW|hYq`_2{Lb%o*ZZ3S@2jEbWr}WGz;mC|0Knd8yx6w;7S1Y)4wf$kcdhLE0#=mxc2RWN+JpvKcx}T6MymCY{TWftx zZc#Ep1ZnI38|`OJD#n34;L}b*M`K@8X zaOMcfg0BO%M&nVqab=3#VqyqVlagl+XuYnFx~KT{_;udf&5GaNn$vw3h@=R)G94)J zyqdEe)t#5=0Ugch%Ei@|wN-o!lWb|bX}(RiU>1P)QtEx)$%|?8ioWWJ_qwvN={*c` z=<3v|I-Wa=wWFlH%BlF@Yt)y?)iRJ{!!hy7Dx#t;miR1C+0)!a^w|ZIj*2%YeO-b) zE6^TnR}_@EfXXB7KG)@vNmx8`LX)0v$s@+fCUM^g2+b%)WEUKt@_j=1W(r6#&dNMk zzTx|3`-KRIFfL%7!ap=TI6vG!{C?|rfPavAKtAO>Svfs_$asi3#d~0Xqy4mgqx?kr z{MjYnBhe%Go#_|+1^x;8De)%qM&^~!E%u$(EwWwoEwWwGlS>p*ixO0j{v z+;6+faJY-H8+}uLb5={JkNFg;)taVmkt9F6$wpiC27tU8{Qn0IW-TM||AG6lk`9H~ zNFX4&2|2nHfVBhOitE-;;9wU@inbGpG8&Dir6Q&g+yNS~SYTlxq!3CzAvt+?eWfJW zt)?=HFi1bxkKa0fZQ7OGpD#zmgI{q=iL@d&mIoNtM+9RIGP9)c(V!#I$TQ@ z{m!a-zHANZ`hc?C3i4WHQY@WTU6xsM}Xx$mEX=|EH z>kqA>mSsGBixlVY!HO-4w5xx>i%;cFI+CeWo>zG_rP5S2pDbCmNvEc9Yu*nz{Zwfr zW*su_0PY(#a02sIwC}x?YEJb_6_+2Cqsrv1uILUc>#~fBtPfrCR2MWJ%WJdOT#n(} zv?G-0G+a4ZCR#8eUDRflD#VRM?6t;zAZ7_*x8#`?=vNkLz=>tm0!5SU@{d#L`YdpV z@zxHr1C>L+maTUymm|{3JJhSr89e!Q42n2-fuXx{5Jy&nm>{Xh$&l=289epB9?g{} zO&h=M)i^Ke=7xWmR4Gaw!!OzP+MiU-t~{Lovt0>U%C%`6Yd5_$xBT}&d!Ad=qUohm zU1}~ZuOO_Fw^*JsZdfx!s%w>)NprZsvudXonUgJ(Wf{ddIWtY66Q$dvSmvtw_{cKf z0!Ub*%p0>Wt1NLol;_!3sMVAZkZUXxpVUJxhY%!82R{9>T(Z!j!MYc#!QjxVUW~N0 zS-#5IHR91kS#z!s<*0|(VLUz;BdF4&u{S^Di24_(W#U2Js>}$vw9_mrunn_@I{3D$ zSets@!v==TzG|angabP~;hD9+-W}9H3^+dJz_!79{z;J?TY9Y>q_d4Lq~PwsjA!q& zr6o*SfY5S=X9#blzAZ0n=TUI(T&{L3KH4Rh!)8~plh4375#C1E7zq zoffm{#3ke%Sj;C~D7?jA73PlpDxtimx%b?W`nY?UfxV>3ol0%ljBjOB#-cnA1kljA zxG5#S`r1Dtk8+r=KfcMP*(7Age8lDp|LmZlu~O5~z#jSLQ`3BznM$SVqS3mE4iTX* zI><^6ne^Z7?Zo3-EhIOIwO~G5?)ps$T0uAWzn~udhB#AzqemG{VDAx285XqfK0W(r zAK#ANK&QOcIuTOa3wrJ+IVc$V4cu>fsvTZiNQp6RKT6^P+dkV%r%9?`Q|gZsA03p! z2Au@7-+rQrf22k+!>EHrY^9|WeA8u?_X3aHGYSW)!Yb4!KGGF>Q%lSH(Z|Atcr;<= zjpF5V-TJq1P=EK1W73za_Xp))w8X0bzQY8%vog zSWe0+CsBGtPR5rcynN&JiScGhOk^~um>=3bl~Eu~5v3T0u4~M0n#}ET^HJEFwb%GR z+$XlO_V<1Q8TnxF2UaX_o2PMhR(2Q4>gEx&SEB44fqPdW85heR^~<1>dW46sVXONY ze^iYH*j7v4poa*#P3daO(Ijmlqt z!rtZP?{}BFp3Vq6iuKgxx}I`Ja4(q02sO+R9xtQ9eEqMjL$1L?mCrx}DA&%Fu0#hz zcS_g@;FOF?ew`$mX7l}tyCM5w67_?goARb`Cc14U{Iyn!5Uk8Q4oEe)7l2SYP*>v{ zPw-2iPCXr%>~<5Y$6Y>-yFw0+fstBT7f0S#)t~iYd3y9j`3y@|{cYWUt+Zu&9R1*3 z=&r_0EDLL#i}$kUckSosPxVd_Ec2r4$51wAsC|!QxFitsCJ@g)MkY#%wui*@Dt(0&fVJv*Z24JHi=YD}BX0zS6^d6mxWMQwK4btpDU&!Ov{>82hgGoP7%0(iX zRTRaCI&(02w_g1k(D?^A=#voe;WTR?Ygecx;P+Z#l#*T4n>InO*`lC|TuDh%93349 zGyl14N|KnJCNm|2WGJka3h`$P5)7>r3M1nPY8pXxfI5%w(<%Jv`% zJo@uJ5PlFHjA0k7PEpH`arDscuXGS4z6m>rc7PKQfU<@TI%eZT%<@YUiFfWoRDmRi{!YzFaa{J`7$h@12PA{4$dFf!UnBq75Y)vDGmuY_qYL4RWY2|RixAcg>uV0{hkZtNp{%XL zYXq=b77W`gh6Uqtj{Q(X&Tmf((rJ3_VK!0LBaJWHp z;DLiOzr2@run^^oJ{-wl75rK#`K-k-1Cabi(g*PW@mxA%+h#qPX!ftW zZ6JBA!g=+ROQ5*m2WLfw-GgM<|Fj_tmP!}Fs*t;+-4Y$%|zBK zD>kUo%eVBg#dV0>M>{~PVa!6+OZum=cDwy(|K2K?s>G(X71Dj3oYmtU+_p|ng{>{8 zYh4QL8t{0Ez8c|`J5hL;xXF2x9K=GMiIV?CTc@PRQtlt`TsqKVQM)*uk4V*CBCNi%vTP^wZ7pU)j1`Z0`#yWFA zQZc|0a*pv~^Z%hZ786ri?a+1jxAJ?4djkGR&$6pXS(2JzbJ$x|-V1qOjJD%IYbfV| zPznxKu;A7iP?AEz=<=?;)~aa8nnIRio2Hu%vsN^Z22J< zw4BC15yo=`(RD}ckBR;HZN48bl^i>X5HH19{ik76B0P-Sg>hx03;$d{+SUFQhj}95 zP?&k3{kY!UPkesy&$eAaa_3*enB4_@f1n{ge@`5tA%@7~x>nA@(R2D}UOQ7n)6M3N z(i8ucyjHel+pi)sY0(o|q8%q`nZhbQ1|_Lh0f&yV%^%WPQG;p_jO)SV+2F~$gnc<6 z;=VM6DuqH!LPrsw4P=z2?KdsEpBhLh^Pyfzv(h>ERgVH2lVGK$dO}N=~EO4CpQF8t*1$SroDv zvV5XW?O#7I)~edugE1vX75_sTe1KtR2OhGGnZ$3h@AV|qkr2NdYFffJ6OZq8we%&C z82&}spd#DI!1QRD&f{)E@ma_N=6xC&xf)~7vHO}S>z8gUN7%_rqewIFd8(e>-P-QS zWJ$#skg`AXuogZ(j6iV_%`RzB-9pn4#bOAbAFug833OX|F0EA@R49LBS zqJI_c&n#O-^F#gRebSPg-wjxc;W%8|Th+3Im-GC|#bt9zbCk2%=%JqVc1*C{UeF*2 zj^?;rM7$sEhvqc<*K=_$%@>O0Xtf|#3HqLi{NP1aSR6lVsv;V>0g>D734!Bw<@kju z`k9C4T-d~bNvEaY`p*k~7Vy}y&}TY>G&21ZdT?U1ZR6}xGe9(8Z@V{tX~@)J-K=l? z-nfEOlG$b*&A4*6wnUUey_h<~-`Z%~-T0LzatTy6PeJLZCb;fU`7GjPA>L`t(D=g8}E{9v~uL2J0Uq ze*;qPYI^GSd$1<9fj6sgf>iXw|5{S>z)Zf%IpV0LM9-H>Q;y4c&v-Jg7mm}$+zQDE zzAqxnaiv}`Ilk=Xnux3duXorf7I7&4_r1J9YBzS(Qn?4x1})3XL^U6k4K{_fVtng$ z4)1)|wa&Kh?k!f2+gZ}L=heAJ_298IU225(m)+4djfPb);523k6@M&x&e`3lWMD9( zTEiN;?9`RasJg<{lFg8q8q8SfnEi`Jqw@HXujTPRk3D6^8SVXZs?^lUOF*wfe=cew zkbgIdVJa(CDkqIMO>@6FbTN+E)kRcp9|txVPyWEhhnb&Ku%mX^wF=Wp+QP!df}D#* z08@7=Ix6ZMFrh={!+^Juby^ixr*c* zFfV4=RJRmw)1NxE&gAJdHQc?CZc?1k`*||3W>k=l*X)P2F`t>UAM#*z3ui6ATv}(A zEp+Mef!Ff*91)9e{fmlUB;{oPfiG$ShesyifwW5$NU@Bro+D*Y30gj3Z5Py zetk=&d@=37!GOBwQV4;u?I4B)JoQHV|5znhML&V11_g2%nNYA15Fo|Dx-zfL>R{uSJqCd zA&|}R!+LE^+NkGo4&CASg6*wG3ZqlC68#cJ!^hmO4jzp3K-{*ENKgU5vqe8tyP=`Y zDUVQpv7S#YQ(wvXoJ&nmSM}F_J74f{$eEQgh|9SWPPx?4;$1|7i;Z-(`s zdkRhsJ11Do@K^EUIG%2jxl8io5sQj0W|azjTiUdY%0=8aqm?SN61t%~mv_}_Rre&< za4ruv4LbL?#-eey0U!^UHpOG-8e(XysHqkU%C)yB*L0E@8vE}z6Jg%G^s0lzsgXq^ zkRsToZliS8ZPkEAqt;+2;vav?>|>f7sv-+-T1Cehlj!2chi;WV-=3T75nS84ONUN{ zn$_HLmKdjdOXVcy2X->;t8SUea#og$b%le)Izva3?m+O44QoB%<7gXv4Xv%-x0Hk; zW>RjUX0?VX;WC{Bf2CS?fyr~`tfDexGG{=1X3awJho*SbP}=2&fx&G`4IU+nx^>CR zyDuv<1IfSv7iww4$z)EnOWQf)U9asI$$9dImW{g_-J`g(-sDlVLc7*8cIKQzNBW*> zY4~B8bmP(6R;~vitdc^!DxgM#A>H*gru_c^Ye1C0C0FqM)`Z%UMucs0N>i%t0~xtg zkK|`66$H&ss4SMde0XFV$Ez-#X{K0LQ!kPnUV4Z2=F&%LZW_S>zr*1iGCd1`PvhLa>5Y3Ag5imRqvR}VG}NdwB|~YSbtBpS8IS_v{=s9 zlwH|MmGFRCQP<-7@<%rTYWdUfL${o_9K2LUca|&J{0ypv8>CI0B1HuCTsvOfLIjR$ zpj0d$?L~EgONz6~`v?v^My9N|XdMtoaA+q>!DPEO?OM?+AB)0+4E~SSI(1>fKe6s?-o!7kGQqE+f|z@ zFCJA@^|kgiGdj>RS5+|T%H{Km z+`F4VSgg_hr(bb%rBeGG;h`;&Fcn@eB<`p;5Yp)mDq-b<*?&B;NJL&zv0VPvmtUi_ zqbg7fd9;Omu{za5O=L!3=4{a~T|P}$i%ub=ZWRIRxm3?i9Q6I}ADAqbbG74L%_O1# zuOPWC-Hs(`Pd-`q)uYRQf%|3++5xDk6di<1xMB; ze!<6^l8Pb;ihty&=U!^M&zp~kWbf%b}36~Xq5g<4# znpj2|Z1X+VxwOXXIdl!YtW30kdQ+-NxaOkOWl^DKExa;uQ5s&L(AvZ$8ISeleO^GD zq1CGrzf_)=6b_P$+7+kn1dTh*oxa2cpH3n$3^{?o+JFDXjAR7-e_kM<(abcc8J^MB zv)TL?x$ zV4Lz3lV;G9AB$A8j|DN>V|#h+qP!wQU!w3(4; zWJIO|%;V9JHLuM>(MV7&rm(g$WSx|3#N(?%fwNM-$|P5eu!vf5&KG05{!2UgKrH?* zrd#oUnff}ZuQ#P`+v;?0$oat$CfmV;*(>$l4|V%n?JNn-wAnIkfbwdPtqwUqN__*q zdVl?MYrqgODdn=TVJTTAQ^(1?%OTPaF2$Z- zp{)I1rg{8I9{9=7evS(xNWQyDeR4zASdU?9JOYm)7Q6>8rl$$@Z^jcYUyJBmFEze0 zJR!+9G8qF9q&Xs4#^YAV_}LMck&&0dNPk9%X@lZgoXyG@05VMS*f%nC+ zxU361jx?hbD4Sr3eD`=fWbue2HW1Sdis^bVy;|ez5lxS8gg1H|aV>3MtF~=_+g6Z8 z275dK3$J&?HAHibqKWx5F=Q(!+J7yPfcUF>9O!k8;TU@?Gp%n)GfFp}fCQ{xd-3{4 ze|^Kn>v4bmI*;D~yS7;xzdo!A2%snRem^XmR9NB&OT0BKaVil>MFofetx2HO2{}CK zqj$kDJl@wVPGtUOZt;}>Y#b~pHXA*jgw!37nuv={Oy3y}SW2xF)LJzlZhwGC#}^qF zj;T`u`T@BC-%PjF`zAPv;fbK8C`f%C-vYL~s~X=tAo|9)KNJ8UnK1n~3^pl=q3xCMAYJ7`~0jfzE1ytdsJR!MJmj%{7FSgHXd@F5&1e+ki zCP)f|=H1xbc5%G6`SI$9PGstU$NORG5J7@AX`QL88&TG&T-}Xa-37hKiW@z?6IP@Vu-yaVMg;7} zaVvbjs)~3ll0xjxA$F%Fdy)(TFMY2m^CNONBa;PbBSO1SZmH?|9hcvz5Sxaz=~l$1 zf!K3qEM{Sdq50>+=;=kc3@^WrA*b6r#N=(*S9 zW5C?4$ef!TvA4dXF7_gA9dWbXCvMUB%~I8rTf)^U)dqHPN@1dqX2IjP!!wsTVn5~m zewFvPBJXe0_^pNvj27D1Xa?g(dLB;L$K*(1VFH%ip?_TA8*&?Y1LH^v4Ho>yfvOJ& z#6CE4p9-Q4Pa@1os{R@H95fkts;K5cpb5bkj`+>?0M;wb^%$*Fs*Q=m$g6$eOlI$g z#*}hn9={Xj??>sqy&DKMqtk{OKJKC+iH1pAmF&B?!!HNX9Ui|M^ta=6oJJPp;9}$R z_&t8tyML2>aHlGd8TjFV#xsh?z1-kepzI%5H2F>(Atmne__iFr}r4i+%NdpQY?O-!Wq6%pM{0O<> zZd9CmX&GzS$)J8k!1<6rW8M#k-@lZ`83j&{XMZs}dVB&*kxHH=?a%_;<$pLW)#rTkIg&m1{%utVG)P<9% zB6*;qFd!zNs9_x`@ zcz*#Z+zAy5j+iFLPWQ@x3ip(l(zvU-jk^zRSmRUbV+tQ@H9pT7UZ>IcpC3GpBX-G8#j4}<9;NUmHUS=oc5SFZ`lZ7^Wl%ad1e`2(1%{OlEY{K|EN1TNlv zEgj(o&OFpS^N`0MzB!}uhdurXqB)H^^GMk6YuTf8bMp9OkG329Y8FB4i~b<`7tnR%@%eogzeeM~@%ZD|IE;-m6)_-@w<4Vv8 z702cnOc&P^fXlOL>q$iR+PHXv)tXl(y_ycHqfcOyYQhPRKLueALfDf7;wc2`L~Dwl zF!d@fENWzVI%p)i>9$8>?^D=g`Yb;2@wCUE0h1b-JQEHePz#W|p{t!=$Mmf}{es7z z1<4WAgJ=823rtAM8Dz<*x_<{E(}JVPG><^ab2xgv|A@ETL!Za@#<+Nn=?`IPmv~;| z&rzH4Eq)&77(8j5g^TU9{&bp++HLk8E1A}xNrT$Uq_X$y7vYkwP*()vasUtqq_@~R6 z@#W(0cDuyq8vkr@V~ANve=|w_P2-UJJ0&~7diH`xV zG4j5h9)`oegcjtX-+TNV)PEl4pAS1$L}ji10*=Gx58?VP^nmMCA^cslHwN31qb!Nu z$j#=(;3Iz;R9B=YAdd z4$R2hKYRQOG8Z&|Nr~^tf+$t7@9Bpis#^Z9K**>sVZ{HnRLj2b<@hJSa7A2j}V+Ev#{zDn)>zW@LL0RR7GRta2G)f&H-JHS07S1-%} zE>u^Cmvh8{K}H2syg-1*P)V?!OCrmtBeFPvX=Itc%I%r@Qr@Idl$yI)Xiv?TOD@lS zUw>0UvCJ~aT+-CuckZ0Y%X|EOANRZ8`Og2_zdPxIN3099ALYgEvYks;gba6vAw)6p z;tr3vp`XBuJ3Zn?zYya*J1_1EvfEXYK^4rZCDJ2q3gqmnU{Vbh)hwtcbFjf9erd2v zyJb~Y1w)#_Pe?Nh7Bxc*HwBwjV>;OS7k}7NBZ1j$kfpnD)Yb%ipd$9vkc1hs;t{tP z6zP^n+!`p0@$dl6CcyKEUjcWR>Y0ULvq#(pYS(4EoUWKM3>n~opgI1HNBlQ9{10$= zH+5Jrs1~zo0*AlW9JZ*D)RJle#O(mFmx|0L@Cu@$xH~|&pPGpjg5lPVKt+)iIe*O{ z2qAXB-{TQ?0{(jnFYXPJ6`ByEYEg{~A(CBzcF`Oq?GubD#MaIm5$6%Vfd~^Qh~OuL zZ~(H;_aei~e>~!DKwJcf`-3=0z#j16pua3#<9P9aN8AgC)o^%_nuBmmkiUJ9oJ7@N z@>isKnHLXv#Q%Vn2iU`7RKPsq5r6lC;C&Dr4U%Q>roBl^;sJ1_SAujxNCdK{2%ITJ zkS!^K5@(lAn0lF{6XB|{hjh#>9p}YkvT9GWKphQ`jtsE_7(OT}VkDp*SK~~oMS$x2 z+oe+=I04xGpg^uL!BVXi6fv0>8$IF) zFt!X_y%sNhCqUJc1?U#qhj2tY&^u$$v9!gYz4TVTb4ZO8`+FEHfPYyIK3|WQeiVZ2 z3c#4PxrZ8VdtSCnKMB1A=_lwDi}ZsrT)M%FKWIe|pc}74$!>Vxpd`L2)fxKHA>E{g zeu6Wa_nR$(+2#%AXPA5N;!_BwDPC&!1>@ZYM|E*%+d&WC?2vw@@V~$w0fZG0$E|qj zS0PC8#$mKrw2NDyPk&PA=xeURikKnGZ1w2jesxH; zJ@Ilgbh-kWf7-j<6rID`;?rBW=aBAD3%_~9bpW;+!0yLOzk|cx`U$X-)&X8>SLuOZ zT&t}V@N^yE+}GngfD_mm_aWfl9nxT4BaY!&_!unZvl;_w^HT;>_Gjxl#*wx^(6;|U(=m9J55&sE}ZjgDaUw|yt@Kyk?2Fd4k-fD=q z1;XgWrM9vKh<|CpaoJsM{@E7orDI%nlr13J!dv}=lq@xsrdW-GP^t}fn?cTE_RMJ} zLHjsZ13)JLj(BUJ!zMzGtYVA8R%?Z|B9Ax+tk)^L)fjJUB}j@2-J@+X*ix1>lLd6= zKvBBCehTA~1jaF$}Yw%8H-@>=Sm-Hv%V24ejHbdZq<`>%VESUKcvT1I)lh1)4840#f zA<-75TbH&lle^_geg>RA?_J5(P&gNiJzB10#pM0OU-)w+Yc2AvXczmzoiMOPT?zMy z7r~V+;D2RXNrBOzj?fT07tXcq_2&X}wf!X|Z1+6uc9FU!mY;ZRm{y<7OzFN~KrJ6jifN%i1U^Q#pa- zxazgTn0~}jIu!fKmkC0Kt`ZejF*5~Gk?Afkj#1if`LMq+zb>7#50{RrqdRv z9pl>rb4wXLxBrrab@y`NvW<15mOg{>!E3<0@iq`oMr?cf?avDny##4C$ z!=$k!GW|Y4Nlb^zlbJjf=sCvIxcckx2QZ%MXX*TJ$8e~AwoX5X$tmVbI^Txt_z{dB z1%H&scqhN zN$dC_)BTq5^zS^7iQNliPmR(mOX%JAIPa`(@yhwO>>h6znp6$XFTH;8W`^yPZ}7hJ zK0J=e*Rl6UEFPb{mdP(MUwt@RnO!>5{yur8s{(xUD+bl+la@@$e!kQcD_=^lbAQx^ zW@YV8-CnoKx_)`TGBu5_btn7bkPLPH7Zcle?r`Dk$dt&s>cNQ|(-7;~U@lOOKPMKg@C_Hh;ewwEofhY*%LU3&Wn8dHML{+TNSplfGFtpk{dU zw?B0vXU;|3dbTktc~4b%(+5uTbwBHi6NE);{~ane4qm$WXz{&`zpmbYpktSe!3`a{ z27Y#Q)-~IPoyI-kg=-GKRX(k$GJn;CqMct%95p&}Vg1T}1-rSX9+!R$aDPp24%s=o z-{?#B0~^bpe>%Ledu{LeRmI^0I&P>Q+4S;?pUJhFiO0*jq*|VBwZoOPC+1Y;v3;4p zhCJv}@#CVpKf)LFe&ukh*M^@Mf3w}N;MA4R&THboUOe&2aOdle`FA`0u=lg1rtoW? zoVja$8*0qFojdeK!p7vMkADn%wf)DJ`Nwy|hxYO$4tzhmZp!I7mik#ZU@~{%Vq&Rs z zYx^ENvTxetnf~jyRp0rFZ>*fwc7Lny_O!h<;z8_K`Rk?8Po=+n@_*vej;GFMY`@fI z%E&S~f6khkj#GBE3%gPE(%Gzs5z~HOS~hA{Xl-Kn5o@lz>+SP@9ogrTl4WJX1ljzc z-`1d*51tSE-h5+q#KWXTzbxJMQ9#4a4drK>Mzq^X_haZ|dhmKNB0-9MJDt z9n+!Tyw91A;eTdDKOd{vOx*`s#rV5GE11qLpih~+31~T!Uk6&od5qr$G=H1T@;1f`EKhUU-8#ne zEFZI(obI|=Y~EKg-oRp=$#iIZ)7f3RZ{K7(G^VMHUkFsi>{2@u8Ba0X?Cv{26Btjw zBSnm-7~>dEzd>UdPrpqgIqy!Kz{q_xGa@b4UP$s8%e`UN2=tbs#A98 zI#l-sCa2%=L4S;=vP>Y6^Ztg);Jo{~FHj1TC$e`c?*`P1-KF2Do{aCp&S)Mw0>v;s zf}PR4g(g>2(EM1ZkEcY%ggc#oQ1g+lgpgtV+HSlF=YtJd@eVK z3`i^D+H|wWcDFlX?YZzw=oaTw5?@Ts&LsoW2NiK)V_X$Vx9INCv9{Qr-ECv?yLa#D z>^}NW&3{2TSxnO}w&yr4nu$(#IfPTh`FD@DM?0v@P~j?ejt$6%bYIRhdZq#Q2BjCQ*!3dXveG5Im&?t&P%Ge6J$gv%``bvvV-?(yTes^mkt)WkOJAgi#k1g}Ndeiho7jP#kig9w;8Ap=^|cMxd8bE*gaj zkP8)~GE{*oksGC>4AdVDKvn29^g4PIO+(YsOf(n0i{3*E(Gv6lszD#3rDz#ijy^>z z&<6B5+KP6do#+rcg3h9I=rU?R-=iPUP1KBTqdVvxdVn6HN7xVhV}vE#8n?kF9F8OK zGk@5EtvCutt!r|}tl z4qw2R@fCa(*W(-bCccaB;otBBd>=o?e}9kw5=am+l28&x+K~v-iF77b(uH6WMQp@Q z;z%q>Bz;H{QAsLEBmGGxd4UWj*(8U&M23-<$w-n*@<{0DvF>03nxQegPDh*xmvVe|%X9bQ{&Vp7BW5 z9mSK3J#rFfaVC+2iPy0uCsr0Rks>RLNgR}r1QN@#G?o?1lA|RHBx!JH30rVjLWfcc zw4sMrc+j4lLg8^pp`?^T%b}00yi<6bLl@fj%Ikry^z?mq<|l5?)_)qnr>yZ`^+ ztEg}D6cIf=Q57tvbA|*IG&Fsa{G*YtW->A3etp;AIas%ZJKi= zo+%iTy>DdW!x&ld^Rl8uiWjy<__ z){&bqvNfrEt~3$L81bZ$pJqE`x$lBMvBdr)jEv+&dn1!e2^dZzpU>r|LuED@&nIKn zM`wIEmoK{F*?8vQHHI^jE))x{kxaZ;G_sBY-2xa!JTY38&Q()f;zbWbPF*?7%LiuVr-<8on~u@(NKj|ovYlGWz>RXx@|g<8=nAnNpT`?WOCR@)?{;8 zc!V(1+0-=HDYKAaBR(F>q=%~$qowRvL5dF-iurf~e`vz5nej|Im5t$OEyYF>)Ka#{ zw@=%jPZy~RN#lUiT8k4nGPs`0DiwP;=>(-YM{=2DraGOYrl#9)l@Z`uJQY}-C(rvw z#8@#GqiInSM>f+B^yEZ~g_5gUTirGU^pngqSd?VE7_YVoZsdBx$QG&XRwuGJo0tKI zj1=UUf65s&pDJ)}Z2w2zbauDnJleJP+3gpH)Y)ZibhppIWC0LMWa6cQ@lP1L+N+W3 zsp9BA+1J|VxBmlFJAKT|zuN8km%CbfedX?XYW27=K5XP?w_ivuM4L%84QlX7YZp-} z`Sft9XjCI#kW>ODwf*n272Fr3TsDe@f5F&=f4QsOL8-OvtZq8}%&QsYLSku9U%_#~ zN?ki`H3)^0xT27Y<@_e6cm2K)T`=lm-(ZucL@(#l&Km{HV0g?+xu0uVAka;Xz0kzL$T zS(U>=l~<2wite5(P@*=adxTn#NopOkWaP7&@kB{+x}Y!x^$t}!p*w^+6=8d5*oucnZJm@!X3|4)cy*8B)ILyoQg6No zILSVm^04KDdr z-B0yWCmI}@v_kg_DH-vbWR6RHf6pXyY3RBf_D2Sku%HN(A`jSLD0*ZFgn2IYBi((e zLcMLm=lSqflT>=7DpE(9(=E;A$sA}lNh3Vi&Xc*id&znJ1s4B8%D<5E&+C@v^JE_2 zFo_Be@fT7Xcrssiw=k*NCX+0*=A@;6QpRaMJBuu zwUg~qB%978o8dnV@KTd3L4XFaJE@~3SAL9{{pf8l9PqLhQ^G)X&H zdyvIijjF;0TH2(8hNMY8>Vo^-*FTDaN?CQm@};|%qdb`FPR|R-X9u$dqziMiOW)mDC5M9*Fc*iu5RxpyHf*=sC&0Ua7~o#uti$h_mXk z&MJYkDu6ZNUI}smf8>H1EXc?FQcn~|bzJ83W5A#bwY$GEL=;*lt;-&hbkljMNqh)- z73y=DCN1@?@JUP2>ZIPh6yQm(tZRLmFCs*gh*$T~(hZozf7q#!nciCO-|sFrNe|Sm z)_Af)(o_vajzswT%LcB?jJg!)U89=iL>XEYwZjRa}i?rM(y=pV8u=TEjUWc8~ zE9r$Gt+a0hvJwP}&ML6d`7)TK^J=(6XB`31d5uX{A>1Is4f>^!*Y0ki8cnmW1ypBB zqs=k$9%8->e`VSbkk-NLbr*uRFD{d;#!+2OOUi@ZC3y76M{rb!qjTs)o1$WMB;EEs zBH-@eD03}%iHw~!HuIVa<~25R9|Db#TALQMX<=b9rRz;fYJld8ihK;Ase-MJVuS=T z$y%FpEkk83C^YIYbkMmE`so~jQ*>U35b3<$Bw?Tue@4Y^psflZ4ADE>0-?_aVIx4; z`1c@0Chh#YO}m4^dEr zaD>q2;}9rsJ$Umy@_eYB?=!l6ZJhXZZgRL zWY%H1e>os+QkG+AHJTb!kbjEo2`Z|CzCjeYvIT)e?Nr}lrTP{GyrnYLw+pU8|&#-kZ23!wzeg;M(NLe^!%hhipG&w^3OICAfnLHYjxcS-~B) z;FTuX2|;?1ZJ&yfw$6blY@C5R)cz-U~-#e(!`UPO@*IBv=*%ml`Q5-9O;Cet#Ef(SG;u7 zD^#n}W}YOF!Pwe{tz5d1&y94JoJHp%1A4;EJ7%Ys7VrsokL_{$SahWQJ*4fe?xt$ zm);$SA3Dd-f1vX!)MzFkWuY@m-GW*?R0KH$)lu41qx7cH6(3MANphoRzoM+dNZRdc zoK?m+D6}H77oiZcQOKFZfVrz-ZUR>9GO=3F$Y)_i524p-1PNr4l-*-fL_UiX-;0h1 zExQ0jJ|OLd(|ap{*b5-`Rsu14e*uWme+ERsBx&f}4V}e+RD#YD)yY97jTE|+C#8!5 zO4|YL!(|V#7{vDXO4kT$1TQ=|h*!ixywkn#z$-_CUNsT)YIryt47rE!H(HB7iEn`s zPl$u`3Beu1GvW?V@2IW8cZg#&Cv>03dS~u|sU>aF0fDElL62bF8rBnoe|iM0tn4^o zZ{l&bALt&$aUs3;^W+-8bS(|=+Kb%pvfV#~=NQ~ipkNO7N}q%KiZ>t~0f0v=0DS;3 z)G8hExs$ZThN#Z#cyfdm4_O!EvbHrdu_-$Xm|YEINBjK)M0!H`txARk}g36Zi&f(*!<#k-3a*?nZo7g1IyR zyQx>YMW|8bH3G7je$mU(w`iMmv(U}H!qM+m^ciMZ#hFde?^>0Sf8I=A)1blmTl~_E z6v$ig%?}3lfOMi)nqolS1|V;{03--e5Ta@@iYIB5K1pv&xEpKKqqMgb=t0q<_0g}9 z8`blPaRN$jv%x*VlPSM+l3F}@QJnR5oVS}~FWeY|#2vD(hLtLBSnyI1X%*!+sUPv= zPJFioPX<`r)hpf2f0E}ElIIjn9vOb0^5KI|4f^1>k0!vW&}5A~L?4<_2m5MV3CJls zvF_r@-G1p#>fIOdRT@UdVZ`i}?zsr?S^(Z^cOt8WTHm>GGt%!Po`@r?2==$Fobbe`+!6D(D_Yug93l=u~zV zMit|q&DISVa~S_TjQPw~dF^04D&tv%;bl7Mx~XElnCZNP{yz>|Q-2mRogEmRjGy|@ zSh4n3@CO)uImU_#pD$x;8jH@>)b5&!b%@zoTj2wh?WozCUlASEoa^yTPhv_j`WKiEp3!$RdXdd# z`Y!f4&wMHCxte{RVD`)Os~A1Q**Rau=sTI7JuK!jfB$wyr+&FA=-U`Q$>J>gvz5_@ z%Q#Saf4WF(4LCcR4&1vtx;yt1&GX4a^`qyOzI9Gtw{Gz5t>25dKm6JQ9XC(CB+uv% zJrexsPbLyi{NSFqw!Qy&{o27le?PGAo=5k*{?K=Rao?}6{mr%gUHN<~H+S*N>;Gr@t!K1XfAXv2nUZv4 zbxT8JZ_^EDUYz%}=~LIl9;$A8?zyesXjngRNwa^;%G<*G*6xg*p7Z(a!Yxni?%BWS z2QS<@_rs>a(HEL-Zr*ukY2?v+);)9Y#=q8XfBWHe8`53XxmzB4;4=P`y-mk@fAQer zd_#I!f8yy+Glj87zVfR(oqz5g_^<5kU;X*rPyFRikG%8rACCl|{r#qA|8(EU4^|9b zI{OdLtR3w;Hg(#3_4~K(n(?#4nLpllqS$um8#hbNRX@Jt`qRo|+uk4i;Ty{bWB+$! zf9_1+mxbT%`XH^!tMoIkDdxGqT=KIwpE+eb_@gt`*DMf{*Z=peD~0$w(^mg6yzfod z1JU{S&ieLW?|ZFnY3d7G-+5^I$vxNp^yT^a=%>NI{pSg1OK9%TUV69lftjNZ6{XXq zwcmc@oBQURd?mPV@93G958t}+jgt?*fBkdPxbVP^7vC@S-Rjx+me4nA!^2NDJU{cC zf4h16sW+>B`^vk0>3jRWKKt469mm{*)#qL)oL&9vw`+Hno=&aaeB{ZIFFRkHce>}( zKd|FS>)?^B<;9vJPK-k(b#-}ouc?up~9mQ0rSbbZ%nYH$x* zpTI~no|`^Xj|N*`hml}BhcRM|f4=`Sby``QmGxLzgOznxS!mBs>*B&OTVRSRu4)&R@mteHA zbrZv#%9igx+G`wZibaaWrN^vO7_CZj!oEV*`jC~mAFuy?$C6o#qXf}uG|FuC8$6NB*{tEFQc(SX?#4MKS&v>bERT3ogd>I#&9}IDSUd2 zbEUJ1Oetw3xeLG8T6@VPC)3|+3ls5#k&G1&P8fwz!QOBOx2^AA8{4}6^6(fpqg(6h z(fnOn40E8{S7stthN7|cf1CO?k8v|o>7v@*(bLh@-nFc!JvH3Zv&`rjInTLy>j2}d z>RPtPie{gYFCd&T&e_wUb@(aGQB02;qt!TT*Sm2?+#ToRE8?@P&fvN&KPsviKwMe>L%S@&5$?009601;Nb<0Z{;k;aTA&BBUv0{cX6UgXt{PWa)i&YhXSO8Nb6_CC(}pL72A+;i`;tsQGC2x8XLO}@s}jcRg;Agq-HK}3-(^#|oZ zNPo42)Xb0?2*>0sb1IpL#G-UmQUmd1C>Xbga3_i-tg%ESX&sQ_!0G)Icl}7?vePN~pF-EH1@k{h2WZt7SxjaYeGKa!Rs}1my&67y?iVnk6}yDar9* zB5HBF(MT*F4h7|KhB_c8N36i$bSe+{i&!OK(|YBe^QYsDY1Qu0hUr@amAtx zWJt1{l<81hGAtnMN+LKUS+^x)3A>a~QnG}dnyRS5L>Mdubd8K)e^OR$iR4H?QDrOE zvX96yRSMv)BQzj|1_Mee6p|DL<4%nPV{r@x{iq~QH3aY^o}rB)^fCc0mWWdDL4P%r zH&6|A4M-UPD_BaFf?*vW4Fw0KKximz!?cG+!nR<)g1xpRm1&V!0-PL`LMb)aAD675 zWLQFSGO2GG!V1Gdb%0JK!V-u|;mlYv0OV)HV~VQSBJrTAN(p;^3YZOG%xu`ulp+Pd z4O!J^6k7sNg5e=r-|>N@qOurQ8h=&gAfU^^A!S*FllQ#GIrOCd#<4kck*%=BT5 z0Wc39hqFS-cw7SEXgt|(H#RIR4I3D^qes#oRP>$7P=@0%)gDR44RRExEDZr1Ko1Qi z6FA8tvC(YPzF|)n#F-&W(NsJrPct04wuHw@3ieaURFi{{2ARf4U%+k!<9`|Yj6t{F z)p^wk4uNY{{AbEgGMtJ_0eld28)@VG(rZS_D@)^{qCQV~MX9HBS|pVSsWEUp6b}OP zxuxlRlvkC)I*8eT1RQ_nkTldU$ummR8U7kzZs}ycfHI!0fa!|=oHnF04ZH4V9^PRUIj86+ zTw*0BQbihX^AI9s75h2S&$`4SO7q~EB9};UqO(f0QW>J;Aw8OC$(DIibXQY1xvROk zqMrv152bas^DNn^ZM9^d7Rjwyog!GMa*D1Zk@JdFi#BXydCv<}J%9B=o;Z=ZbdVfo zdBWFXafyDBJJd;tc9zn{Y#xtj=csIgBgaI4CnvU0+U+)8ggVT@NNtX++$`;occ- zcqK)SYiBIk_UR&dA%CmWuVGHK&!Dsm z*eNF`dMSrjyJ+Q^d`u}~ZR}PL;{+BMXP$WJ@&v<@7qOgw3|1WwXanOQ5i9y{hX`OQ z8>ZMX)<^Rc<)fDxe>fO&6p0+{F;=9!t-QS5PcuYjjz}UWgb4JZ9I%8xbG7m1@R6sX9N?4ZNtB<(@*-yy z{bx80<=Xf${C}6dy_T5{J{4T^$;}jS_CiP*h^k7o7Xn4&u0t$rIp#6XCLYsJJ|a#L zB$^JglpWxv+lVuw{{r-8=t( z2HJ|#X)EUJ5I4%kcEF~FTsHo~l#GiM38W*=NUI`!4sc1z%@kQL;q7gq;6!O?9(c9@ zB%Lmub$?7gX1rKue7?bWk(-%Er7JBkm2$qWB8eNKwQDU2&Hpz|Qz z20`{Rdkk1=+@u+=0pOgTWe3@WY$*ftrU5yGmg)qf1{ zBdq8Ah(+sm}oJ} zJPzE?=?7<{9{UO$`wASpZ-QA)A|Ec;epKu-^JkOcP^;;X-^F4er_YHW0)J>ecZg;6 z#{k-lUjw@C0vK%p4}g+Z(9qvA`nv%M9MmvFybp_;#P~5WlwP<>U@3rjQ~*9N$LqLo ztpH~+*DEyyPcQp&C?>12JaBi2F1T+Xt%DH$%vz=mieg)OQEcNb={He7tk1$(jSF@N z2D8D_C@)@umtPfr1X!B&rGGH$M1_Ff29UT?uGWwQ2HU}z_8O)GIB-?LC)da|uX8gU z%`R~rCst9JJkw614)C;7LlIEufY8@W1!nxn4XF^-3nOIm0GNi#*o8Uo!kn+2VAhdn zt-;$u@E~UJwhosnBZc8)4&dOsHM9+&*T8Cz0bL5vgOkv419}67-hY6hdncInB zsy{hoIMPFSBmJm?qRdjOz*go#CT`J?iYQUbSQSLD!h^C$OsBsv* z0JqmTi~|}0${V2<6SFCeXlGFoiMAQD06B;?^aUQxBT*a}g@2XJ5bsb8lb|5xMoNEP z8B_<&m#Gu#vJ(zr+6KV=q*IpgGD!+Plat5Gz6IQoVbh99G_0Y0fZqqT^oA@^6kYl^ z8LkNKCk>4O1M`iK+2)0MBcQimyH;)jlm#9}#oVfxTlo^BkVwYddW)!R5$r0W*@3Ek z6DBYPg$is2A%EKKHOvUbfMZZrtUgXZ`_&XKWN>2`d#Q)2*SGYbM!ZZ42Q>?P42V-w&z#BL}YdyUZ{o=tm#On;i&=Vflh=Eg+3L}5S{!OQN( zukB8~u{$?u=m2;Wf!*29ZWZBGNBOGYiskgIS`2~*Ax#QMbU;HlgJ2YBIC!m!)_}mx z@LiQ6)~RwA{(`(>Xc#^%2)b@)R%jW}C}=b^J2VTlENBcg5}E^AwkhL;mXjuvnobW*_@hokNgjZLqD&wr#Tu zUAAr8)nyxB*|u%lW|wW-wr`*ROwNtigN%4LhAU(3%x5X&EWf1r(CMci2d)hD^QmM$ zMCQ@6ef*xal*QhxW%($!;A50LC<0zrQf@`oQqGupkV!|MRm)&ry{>5T&0HJhsbcdO z{gQJc*w8iKH_+=?;Fw`CwFfM?d`S|(02Vk9)0kIJsx7`lYZEDb3ONX!YwD(=OSbdY zMt_WEtu1^tDYUI<=1fO|zoIfaAZ2+6*p4#2_DGG+uh>_poqA@ndJRz!XIh6<`ig9c z!zM`!U;g#;X+?$Y7B3DZ?}@}`#NLp>!Z%nkN5j)e1ZPA@o;wL1v~l+Up2#5 zO{Zni@%N4MPov%7aRX`UjIaIvR`{~NCZfeiN(B`3k)+igg z-TMUY8~#b@XD&0K@#X{3XYR-pz^5zr#j4859PCN=!sN2Oxj^Z*Bjvr+;l0BWex4Ny zaDV+gA$KglnqEw-rPP+I;-OC+^18=ercMs^($4iP$(HWE-*24ZaOKS9>0xd;XupZp zffk?EL>=hT{%O9E-|6`=o@lpF7qfeH5bv_JzA*Jw-d@w6{yD%DukC6B{D{|PmtDkf zlDoby+MKk0Uft)e>)!Brj#6&$3qam*wY@eLZYTF@{-hq{>U#fZ#}38#zGRyBG<7Ne zsC==Af4`fqu)9v$=Sj98@lF zL4~v=L0say-%lTSpge%RzV88?4z}icy>0pQYCmrXZG?Iu1TSv`Joj~)%0C`^dE-xh znoltMTXP7%_L~X$H=fN71vyV`zP&YnE?r(?7<~&hy_Y@z;bs$Kt9Uv%E=B?hS$Z$= z?RGPoqThHaXKeM&@&n$W`S~@**Kez2x*lf1>aXxNN zB1^x;(vH5?IB<3VZ?^VtozdKgpI`np0hG2k1iW+JrqXp@Kd6kUJE(ZAT5NcpXT!J; zHy+I;H_50!)7_@hYNxHI*1O{s+{|K0ZkDK!DHxRNb{m3jIUj56F(%oa;-7z-96lEd zWw!?B{kzjQV7EQV`S?{T@$_C_g>5euxjnDBxt*439_+LMV*F=5J{yD$T%(P8w}gm2 zxoB>9zST2|w`j^j>A$8Rs(osGet@4wVDJBXVf$EBtK56vS-Sl{2( zH+FNO(}dp}b zz&p<=kE+(?tQ(dOjK=M1rW3C`moy)q72EOMnIkiR?7&m&QkI?nG0O)@({U3{dsLe% zrAm6oYft&FqdVTF*{o5w)x)iDE1PiYz>9K&=ez8_%rEah3$kzJvn6R?lVYmLOwZpO zEqn&hsEe-T`G_cVcabx0y!WK#hsEZ$UUjFNlUm|wt2;G!kXub{&EM2Po*66PYE7-K za479_Y2)fCs;R0;YIHRW(a-AYj42hQ>g3#j4lY?q>EV4FPu2}`O@Dic@ThZU`E<+~ z&85{i);MTRQ!n;E@auslb$n^1p(SL>juJRz7%6VFc^U-q#+HAGD3V`NX&uXSek4z{ z7?G4F$T)l8(=0F5RlNC|1vmT5(H*XKtUTB>1I%!uVwQ1%DX7P~RorsoyQ$HQ&KKvwV|%qkRLvfBX370CkLn=A3IVEVBqq=cLDv1mhfwVUaQOe>jKcaOj3lg9F|Gyy!+x|7I+nCxI#qeJi62*QuRe8|~V6G`4WJL$l5SmcsWLw0KWD65A?2RVRoS~^*C-Q$M zvV7B0c_zE2vRHa=)Fd&;;7;;7eLh~ccD}|#b2-{+(bjjcvN@bT6$-Twf**gNg8@PS zHeK@(*Txv_aZ{%y$7&77P5WQiS#Hv6YX#bNyRCI&GOgbvjvRI&LRYF}i0D?U~U=D4Ls#RRh($wKNR`A3$T>a=QRmFro`6;JbY(v3An&5&FZuEw&ZB}5FG zxG0i_{AYjJDl~ET%$T)S6)xnAXWO}db;Gy1>)rohJs9>>yI#e-H2N1<-~l)s<7-%R zAeW8hCj?WiqdoJakmqCKbV%SlaY~RGE+S-YN&!0`N;K~FfFyQp7}oq_$+gw!T7FIC zQ9w(~nNZWJc~^eVJp6E|)}}F*oZ%rpq52?~CV?y|HllvnP}r?NuQt#$5RRZ(qx>hu zy0vwC|7Mz-WlE?vcT%Z2Zy)d@(6gQ=0JgV~E!BO-rS$8pdM&!Wj#rryd~*8aNKC$Y z4alEW#!QwPVRn-tIe_kY*}7HH8m$sNhY>@klkH>$zLMSika@Fg`CPRX*)%0v^%B5m3U^8wJMLCw}sU1)C* ztmVL9iD}bXfNT@1KTLbRRFdgrG6sg6LJmWr{;u0H!@0I*CQ852d`*#V!)E1?dEQ%% zgL#$HO~A2if9x?P7Mxu9wr@Q*cA=2|gs5R<2edb-mStpU3lXyTWLE4l&tiPC=g@l4 zq3UKz)NB2(zcYQ1s|Fx69=nW3GrB1dFf?7wDrBQ?-dk>+k;}^U_<_aaJBD=X+U;Jq zoGB8v5I*~-8x6Z+o<0-3nl2Y$wPpIpZyO=Fsl(tAY3<*&KSkOit0i`YvFJB~{uxs; z@y8+W^e+>OM7LMHRB9>2_$b;~6KjM3D$jYK^r`kjz%m9iy$7TapKU!!wWr|T$J)?z zU(#L2C*HJ}$1xqNRl?7&!CIMDJQ*1i!>PqBxuAf0oop(pzX@pr8>CJ_)-9>etR+Vr zBphjtIUH}O2&lIpDV&wPdkQq|9y$i%7li%N;a3M+KH^Mv?LI2G57QIq1{L&+#z80O z)Sx?ZYN4$RHUMB`8cJb}@Rfu9S+>6b{npvXic@f@Xyl?lhfNOQt^JK*DaVM6VV(hB zO86@nZ;AXuz)SmBkkbCK*7hcf#ay4N0O>=^4cLBOnQo=*GIdoUWvW3LxByhsk)aCj zuc9KWPNx#36@<^h`o%Yw1}(V$L|;NWnQwdaB4gL7x+I{b+QigqW2MKrPjO#Xfy4r7 zk+FI-E6&GeO)pJQ=2PmzUNB%h;^We7Aq>S^WFczn^p)T%`R_ur)-D{ijCj`W zjo05@+=hcR8YE6rk3F!XL4P;F^7B1+Q1<};0ByWxej~=n^g}O3P`HUCPPi(msMrPsA=%CNzlz+RML-GEMCDMVQrIwmC+jS+*YtyWQ#Ee*Bc78s(QRxPa;Y7NPIl%9y!Az(G(SyuiI{&sfH zMkqXt9bFDFW*=^&rffyE`-_@p9szO%(Me@Z=8MnuT^Ss}6xYDPXmR*rCQ|VpD(Y zlNyE%{!$bL(F=?G(TT~MYa1{Ia3jr5g}sgg>-51zmIU<#t1#lCzN;;c%Go5?;p|l+ zm8deyWaw8S3iTEbzlI28rYLH!tNBM3@$c5_#>Gfas(s9-3VQ|=)%rbN#r15Y_CU<9 zI7`V3d7hUR*8*co>R;5e7Tx$gy7W9I$Mqx@)do1j&J*4j*M>emWzV|-1m6ew>3w+H zB)I15Am`mi(OUZVnUWY{tC2h->=qYzYU-j#icFRW$IDR7Qeo6K#c>osy!zj6iw4-9 zLzpx8MYTpU&wOzJP+^V#16Jk$#9SEiD<~re>qd!^Ra3B1HYsTqvHu{DNCRWj zm>v~BLU4MS2;{&QjH3Q2Z)_T~n_pmZ@pQ=`7A4GOY+PNL>e`kFR04$nRfHa` zC40q)q@X{;Fn$~qJeX5%q^RR>-0u>*Vd4g%L-KVGYZaj_DnVUL018`3kq)x58Ca{Q z%yt%aT@Ac~=O$u5hfp0)KYd=pD8&GAOMsu665hz16hI?+ms^!rG-Ko--XWi`-veTt zkf@2`YqahTOQa@5Xfn?;H!T6HOy~=l$XTm^jRf>3cHf)58y;v!3kXfRDVGQsJNOr!Xx>yG3b^(D{_KX;h}SifWR0rq>VCZyddd{94PliNVkU^aNImD z$(Y-rCAYTh{&o$5`3hL$C3lG?uT& zVh9@`xL;k!^M)ZIq==^hM&V5)@PaT5`-m|HekY#2xm#__15gyzb$^|jtXi13tpZ?H zJ&|(EAU1*)kYlrf_bi~c*@=j-Vi_Ze728Dl18*`<3cP!}Nc^Tx;&MtO=K??d2A+HU zz_N=m1f-IQ0o#5^{E&O*)I$Vb+jdAfp=tEc3<{<&+(aSN_XHC{%Ni4x7IYM+_{$3v zTl$ZjE6}Z%&YJg)m-2N1;e};`O{_aX7#(Nddw{Tm^NC zu~U7WbX~ZGxM7%tW3#2Q$o@5;NXdca+Q^pBZ=6B*fc_Xh@*Q6jwt3P-@yQlqGe3Mt zrH42!Vj1j@Hmx@3r%+zDNK{DZsYh5?`YcYmeTs1c|0fOLdsNa-hs6iz3UlEWcWVUT z^_<4ywFPsP&@RNZ&EumSDpEkKhRucpTWg5lMkzCF&kPi&9`~o>`pw0aL=)%XsCkIH*u{iRhF${M{GGU`$A3ZZ#TM z|H+sqj4$h7$Zh1*d*Q%-IuDxzKKs9iOYvj602Ro`SDv$a2-d7na>@@%|Kf)Fp=iUu z5PFC_=brr%WqIsWnlADpKzQMU_u z{MB0Y?~1=6^yM<&L3|Hz?Q;e^#&KkWN5qsz%I8$*LKAh8`cP_b%m3_gd$`19NSp9| zzUd7tn*vc;`=|+(fQQO?U8*`B!dVx?0AABa^#X+%15P=ixlhR!f(cIplF*Cbph}o7>h%X=rEiTpq9?#H0P%|M zum;{h2G(AeHb~tOMpOk#IJ=1=Zaor*1f}f@iw;5Sr1@`)-s1rTk_k*fJZ&Zms6

Pe~mzz?aSH6SNBp zv|b4V^B{ppuZE(=THk2mrOZxBcDctZo8@t~j5@_&i|He8nh3Kem-466$dJxXVI9n{ zY(e!X&)+PG+X}(}u%%Oyp6#{L#b{zYs<@fm**#ZnMQ}uL44X|Cm`4w=h-HQWN#iZ! zT+&_jJ-I^k+p^hO$uuD-K>0d=98^NoiKehOwt!QV-Yjc!o*%1_A6(|%1L}|G0g1!ZAPQJ7v#bX$5(YzS6f;)}HhF+TGI7 z&hLz!Wsgg%pEs*sNXM;}DdvoGevw+QOUC|3?6jK8BfGLwA%t>YT$0Y#o$+4VwCdRD z9SFkqdpkCDKYY6)z}v;6Ah2TZ?dlEDxzlCOn6NLcxV~F&=(Ozjtww8quV`q%_z$QA z0KU3c=Akw3q*e|K4*p-r(M=o(u4OYXKz3EuXNhob7tjB-mG_ylZU^jx`|2d*TO>eb zTWpE|e@~s7^51o%^~oFDeQ|PFQNAO@Mt*J=^J562cGBX*S2bAtJdNG4_@dC{4L% z>_koQJAML4U30Pa*n#-ix%y8aRM+8E)(PKSzT`iRgLPBoFZnGXezaOfpdfvSeCb+$ z-nOrBynMb^HGP^KoeicalknJ=iX>^0Um~-SOX(A%R_QMO1(Gk&N-FLs#Z=OR^P5?bVIY!&%HNKr4 z6&)p-hDlEW5eRLk3W(_Vi>F1+4PuB;*ePf3kC~Oza1afOg$2R}3sd@9)J>R=Vv$Yr z06vz?$wh4E{s6Jt*)xx(pmawph+gP8HRx(VI0JMl`*#E0=FjYU9*~qB2np$a$8rN8 zmOMU~?JBeK)C#K73I=61m|KmK0{m<+0($j1y?dSfg0I7NluwEo)m=D`@B9MeC7z}Y z1G`FH&g_Vo!2#YQB+$z@j?46eIUMvOK)@KnFU^aJg)xveH;}4FGmF4ku{gHHl;)m! zu4+-OnL-yXP|T`wvlr9uD&#EZd9k#1;A7{Ir*D-xNumwtifCI1q6d{ZY9iP~@a{|M zzn<})_gb+BWUhu1feb|_~{XZY)wq7hmFD3I70AjGE z#C}H5*JjD*MR|668X7-AMD0g|puqSk8ZOxDEAW>Vj*Fsho_A@{6M;rVR?V0#%yqZk z>`Pzn%WV>yU?}T@Ikk`LCr7Az2=N(G&~sSS)-hJj4MT?T0fc1G=`WFcROqb0$(0Y;-Z)afJ36F%cs<5=v{Fye-20BhTLK6?5-Q~k2$Dk@E6S@ z-Xgr`%2SeGc+9t_0)9#Gs>wE$|Kv08;kWeK2%8u+u4{uNSJ@UF{{EbxdN$q1x+bG) zb-nQ9?ombHn!>;>M^!;|oe|rs_YiVxv;p_jbxopR2C%TWY7;>f0_fx&Ak`6EJP3x>x!SHG%+rMAIX=vdHe zR74Fh(_KlQths_B6E)2Sz_)f|3>MlWKl+m{F85$g^EoLik(w$9JLs+}f&%n_$)Jnq zBAU!VtgK{A0S?r|Sy436fE81Wk*P4zpouz`q4E?WD#ve*7FY?HtV1G>cr5NuU~P5# zsi}Tp#P_wCSfIO#38Qa!6)!j*d*1^Tfv)PD?CxLC7%N~UCVMn>00!)!A_(4;hsr1P zs4VxW3;GsF%mQ<)ZCi%vsYQbU*>6(!@ewhaj%p@pVkG(Ycx0o>0&b{JriW8I^yGmq zZ;g)|H3(xA4&#@tmDEaK?Pl!)trA(pyb8>O5$efx!$l8Ed0rOA2S zgs{%IF!!?YVN`O3k*?0(&AlpwjD3oIIuv6nSq+uNIf`51hb7 zC4@B)a)+Jax+uCM3+}KYAAX`~X2=I%6(N>v&6T-4#5*|T3hsQQ=xsf1L=_c?i>zS$ z)Hms)fXX!>@xBGMJft#a103rV0@M}44Y_{dvO;klPlcObQf;q$$&v9(Y_E!f&|Pt2u-o*i8-c7FB8~|qwAgP&MNBL%L zR6SLf`pB(en0=$*>PdaSOh%8#eSOT_z(_`H%uc_^8BI)m&7zf|VVHl|NTMXJ!@r-5G056CggCDycKYhHo0g|K3#X{ zb*I35mo9UkHz3m4&z*@rz}d|w$>-11Ik$H4_~mDsD?+vyS| zgBe;i%BZn5pju|HXZEzKNyc7giC&KcI=oT~sEAyECf-ZfKe9$#nUP)aLJ-8xQ5kku zA;Yo^pxt+hfpzxTy-BMsCi`@b!ND4{8k`qm@3ww_=H>%SS8bf?+UmYUB<)o;;rGN= zcmCDQ^YewnK)P1<1z$m?2|?zWGPJONBq(87aY|md>7TUh$R<7Lu55$e4I)eZ=iHAX zn|(=D5&X|}KncO0d9av^^c0kletAiWkWvanz%CLJd>AT}hW#`VgGxnUTYw5^yuhY_ zdmIiCm)MQm)6jO}z7|n)IsDwpzhIqwt0nOA(c8t+HsCOSBcp}5X>wnjGo1ijrq$g2 zWg?r?%@(37s&AM>s}S*$XMZAmDhjs##gJR)!%O}F)PYlOCa+7G6BInD6%wyYDF^mA zz%|@;|Ii+81U}QstcK6TY7mpx+3-*!;(<1|sU%J3iCbon0cGNVV zFM)dFDxBzt*sShl=p2zRvODL)>_Y<@z>>T%N7xh`#WD6`&g!Skl_dO((SBoE_{oio z9y2wCOHbB%6_}>z(?1*MRtw^1Y&RNiTj)-oNE2>*e8`e}I7Z8Hw;yhMc=g8YmZ0W7 zYR2H%-LIar@A3sIvGzcwM&2>moZo+Jg>&SXq_9`&&Ls|aadFi25v0|UhS-b3DLSlb^!{PPQVbywap~yFV^j$%HOI7^N z;t`i}7RyFz>sF|_9|UoNFD7t=d!($1W)dquCnir2*2M})Ax zgh#G_-0uoSAK<40zOcWiGYTIYfS6YWAHJhr$&3DeK*o9^Py9`Yz>~+~H%@ft;P*dm z^l4*y*Qw|gvFgj`7ke@RaESqu&gC5*wMCxRSP6?Tc+=YO3!WkWz;irus>tu}o$W$L zO9$d0c@qu6i!o|qr62+HmUS67e9JFISDppsOY;anTs$5cx-AC2l=nCR zJsv05X^Z>&4@;QNt1g9PXSqcYp|Z=v`)9}xi?o*3$r=3r{Vv|3ka~+lNRWttF>CT3pXXW;)$kOeKz{ z%xBHr%7Pdnwz=WlP(5ch;!!nrVHJO{wPvG_>U^>u^2aFk9HxW$2ZP zsqIy94xfx>t zsqd(2ST>jOpXQjloOrRYCpXc%zaFqNLL84A`(in{sXFx41*-QFWP7%ESp`Wi`w?QA z$m{Fqc$;3XMj-y)iN(@?G@F{y!p+G`?yiH9X_ID1pz}@c6g+B(1&V{OPomHOGk9ovC&c1S8R-+?*4;Eh=$a2Rv_-#i%f|dqI1sO zrP_h-+y}ww>xO1WP3f$`h64! ziF`A}25yl!WGiGRC6mAwPxwtvPEAhYVj?7DGVV6i z$7x}rC8Hz7Q(7-JOpXFih&j_fL@tI%h;1CbHxwVvv`p*}zKpe@Ap=(`9VJs&LLn}u zZA>!XBi&MhfPM+O-e*tFN<|`bhJ!Z)So&jVdu`sr$}Zl=k{4V%2=#P6%OZ5PT<^uB zPRqtHH9N2|)7h^6pysPrujuPrUF9qM-F!)~9H^ycwQZSZ!+y4$q_QfiM^W1`z zO+nAl&$WncIdUXcM4^N`Cf*WWWwW7WIsZ55j}_0#I3*7{#XFp2xO5}|j`>qK;9gQT z{8_SIOteI$0Lp6o_B8CYDCn?lZyC+b9e0_JCV7zx6Xqk-neFiV>+Uc|LC`N0i{Tl= zD;#v_+o*^`1CM4Td~j&bs4I<#B9eL^ib)rbG6s_}OnJy+AC5^li+&n;CE{WTejmdj zwN0OgNDnn~_+lUaF2`M;4wWzh5IZ!wk9t@Bs^d-EgR&E5H~f+|$b(VGf>DprX@vjW z=c$9ZGMak;u^NWE_hW=Q@+!Rv=VfTOiTAOKv5Ki}FOBE7v0rPCgfNorfVqk8HfYF! z_cGw)z|RAOI23447Beun8i8t0A~!;Mr@?~|+kbFp!UHTni0#gm6K=i-SaV0ni9FeR za|hN7OWobMgXu!4?Vq?a>jM7?ZoF0Ng0~y}ADkCB=3eD14nsfSp!1cWCwAgi_!A@b zfbx~xH<%VpZV%mjSkyt4tT;qTd5$)h`bQCmN~}2iUjInCxO~6Zoz|?leZTURx?x@q zTIrn@G^u}_h+OKd`cVZR6fpX+L4ey zsfs}bo2;MYA=Shz65h}Gip3W$zsKW^Q#*WdOY;f&4bC{wyN~S-6*G+Q0F_eD&lZ`! z&+O3NCX_`xjd~JUZWM(@{Csf!6PvKt_5=EVr)Gd7S`jgUgAo6V+pazq&w_&m0uo5g zqX4D?c-e0};-`Kp8+~n3#gnp2X7Vjk*$CrPN?r0?lF4|qQ4}|=Nq9U$#2C?rT0-gJ&eV?)hWEAMwDWjoRGB-Iag;~7?<)6q*-Ew64 z79N)^>M-d2dQ!;lBR4KX!f_Bl7cX1d9Hm)Gc#!XmjOF=7MMmS#x99qxyL-n*tsqdeLeq6uw~ z*g{(LJAN3G18iM1oVu@ScwwzbEk(itU>U*)QCEFQ6`7zV68b984!{VVhbg$&g*r#1%=99HA8I`ws=``&)yyk<$St#u?qU&>68nB4;E-g#fLVUjsIiDq zNMf`Ab?isY2U||@Sr1DAE+caX0+Fr(abX5(oCL50FKakWx*H6)MA}~cyG2+qMyy6X zqwdVucO4-#Xet~bp(NzE)VudC;>c>(hej}G(li*`J9mx=?YT~_80|O6zvzU&KOC1SUHRatI2*+nI;Tu=@qrhc zpWd%X5qLD5uW`RH`N%{OBp-(F4oI3^ zF_z>LLz1gMQ~(twgK0$PiYeB{l*d@`{>6ev_X#g7NpT5nchMe`YCvl~b>%AetfU7a}s5nu!nd)H1qu z4V1x##4}__uKlm~=;uQ(!TJn1{>e{0rIg>(_AF&^u8=W$DEpQ~Kmj}{T9;ZtXkbH6 z$w+W1Q>X#%vd<*UiAe(zCReM{8rY5nYo-{T2zXq?la8Ws5&#`cn$U$muEE3a;V=R< zRY;}{MG8J?j=&W=a288cd<|iy%$N{v0vJFKw!idMi6`_RT2LEc9N@vU2`T5SL2JVl z`_ZEch*a{xJ=LTe6Sk(*UX>u(H4Z2m7^8>3Y=cbAOsODr1e-Hv?k+@$M~0AwkPD(g zDpH7Ts1$Lt@B#ioYU&EJcdk&aWRul`$a#^8lhw$_Xwa&I4^g_HY9@TkTGmM3UVW`& z&+z?%7vt_Vi3MxYbSeSNSH`| zWO{hv5d9ofRDnemQ>?gUe%FHdf@DmG^#iWLp3lxqN`O0HF%V%vZJ{heDWGH+yZtrK z44Z%dF0zV%u+|_Jgb9A_?t^C%q?MWil};7(nE|!A7UO85wK0^g87RW!5bqn%inxML zxoTK5{s|FSSetUmhsgz%-`_OezX(Le**h^*Qa|JtZ7hFih5wC`=Mp0; zJ=T7kn_}tq8k`2}&L!n|>39gs$#96v(7&cja^vKVa)L|l4UqJKkp6|3@)vwS+$}** zAp$^(Yi})#AkW$MMgH5CrvFqc??nx^p!0XDtwB~io{)AHA`3i>be%;5Sx~KDO zCKK>hdApu@a$XYH>wC)xNyz0Q`-L0KE(efP<6rVUrIgnpz4Ex)YIKvsGrL}9_x1$b z-)g%psZsR6rTjd0zl|Vn`b_eFJ7} zceZ~&@qgYg0lrBb7iw?`e-6$zdfiU!My9jE`MljLPga#*+b>l7iL*AsBWdYWw9HqY5(RTI0S`SV|Ic+Tz% z=VSj{X4OwdDeDY>PkhewJAzQk2PB8rrD<;x^H45)-{L{WOX=DAT1ZQmLx71qXpFZ* zuLTa-a}HX(OnU;_dd|$MJ~L1A&*~td-U^F0pZD7)e)e5#PovGouDuzEvv3Qq@a5JT z-rG$!B?z0O!}Es%kG@3dUUN8Bk~u8JXK8N9r5g%K{*%SHtR5eXB)#OG5JkD{DK@>u zMS^4mWi9qwR>o|C!$U|`3LsZVG}zo=;&vqe_4F2f&Z441*Y+Y#*v1j>dhdBR0NHD4 zH~DT)_JUnr`FCOmbxW&rQxo`l2Ny`<0Y1L2*60xU#8e?^OmStkRc1E4hrYjaNqIvI zMmxEPNmO}hbNRhPgyWB-sY+NA9HCLE^XUcNpHkw2uJxx#P1ScE$F@B_ygtla}Q4LU|-2UNqoU`NB9S0_aV#Ie?YSfzG44wA05$p z#RS|&{6F*Nt!2un%NgAng>4LO+`q&+1~d zf#>zYBmr&f$j@JTFW1lOS6f%Mt}m}Bpo++fk~m1F&_MxrvERU=v-TU@5KUycyjvyCAcic&D%(`qa!{Kb}I8Zh;0bUmac)~%7u6{gzurIxqgx;iaM zY3z$ZW^%MrCTN-mFnT;qNEKw|CTQB>)Q(0;iwj8=#m<&0V^~EMn5E7vv>SYbDsam3 zR4Kn}fAQp7QM;HBSXSwbxB?2%sY}W;hbsss${e_o<88F(s4nkpSX5d4X%h}QEK*IL zOc17GOKH?5qpG5lqN!Z15w2=qQnpuBa>p&6jC|hkC=xa916Hww=mHBZ9n}o&duj~N zJEGw&Wz>H+^E*>` zf`_jjJcr~v;k=AwM*>XxPB6p6JpzdC(6qW)WSzcjJldEMgQ38@!5m$T2!kiwmAmINCd)S z>h{~n0nHJ~S%NqKr+V%3Xdps|I022@0uy2NqWK}59#KG1C=hW5?A=A^=yAyY5)9po zFVrGSyEnS?2`IN0S*#Tt7)LKwMN%yUEihsh%;-GrDZO*o!H&nU#cUAPKK*snw%MkH z7SzTnqv~VMJ9wSRByiUYVDDQm=ETEJ7S+h;n(No z{f0*nTYmd06+{yox}84!EEF`#JM}NQO%y6%)Y$6|uM@+nU}Nh^dYcE9hyP~n^^P>j z^Uj{H%6#VV%*aL#H>)wk($42d4^H6Am%2l+x&9a;K?Kl3IPqK~mx!E^Oyt9O!Uo-ePpV-|mT+3`l2S1WJE>)YJ zHC~i5JOZ+<)_Ll+Bd&Z8Zcn&Feet}%r-W;JeeBk9Yrm8o-pJf>Hn|9+@VamHX4F?= zt34ZdwjNf}bNM;|Zs&T#-vxciGi6Td+VZv&HLI=C+1_u{gTM7#TXR?EUB9#Kt``Wq z&@Y6>Y-Uk8MoYUst&SIMu5(^_H&zTcD3*H@`T^sL%+R}P(oWnskb0X>pxo{cujTsB zfECuUNq#1_<+Z}%xY~&&Wexd@km`aR28uK?Dwm%QCfd}g@nZx!s%0lvrQZvFO8XTd z*g~?PT1zPDI!cltY^CJr&OTIy@5Uru(I4ofsZDn_{A?s_ru&2kGE9t-pB{+pclkB; z)f?(Dg^RSzDYfAy1uhGP#d#t&R^yn4r7C7EJn`H)`t$@JX5!X~WLwLW92Qh5oFLp# zI@UU-V1|^TvI^D_TzZzd({0(t=yLjSM!90JF{{8vTa0B1n`SJAuzm%#t06)%iK z>k#ljKwYT+qZO@B;2;F>|9L=@cYOLYm_R_zsYIwCw18!IlM9lMb<)_rW8V6qL8!nm zFb#-F18oc>Py{wI0fCg_dU`r(Aw+)n;nlb_@jm}J6DTs)HCy@j)jEp>*VZwH;x$>P z29GS)hdfCbo5W(9`JfV6`OKn?uj|QGs>iCC_8jh)@0lCGCb#8gokRTCVbs!_dbM`4 zKaWnYRWNl|JfHE>OaHH9IHTqxdv1NkP*bzY7f zRR*yrFx{HDL<4TQz?mXxpd;a2Rmmg#+M7ICstATlXf6$fdU4pI;9mGDT%`o-@J@mX z9oo_v)j2?;AaS&gjEEEi=P%-Dl0sM7S)l!M7(hfhKyDqlJ)}=X{@0QsmZFiA7&J}3 zeJUr;jYL{!l7=HlCiog1V+Pf!M3JXZI`kd``6m9Q?C8{Q4+i_PBHWvx3%wUV;qyfD{)#>n)Wz$cV<^J2`{QqxINW!vV=L*$LOJ!UEbJ0sYg{2vju#j1Z!4$-Wp0|2}_EM zkQWX#@ZK2KJ*lZQ^_fVOI`ngh(wu^q3_#LqJNSb{G^E~|k{6hO3>)AHvT+SR+#nUt zASF*ht9>0szAjuJ;7oxY?S^53vL(fnz4Ooi%Y5cfBW=)_5TVeP_J>|`7dZ>1xi4`_?97x5d@Nj?1c07XV_Qa%=}{580w zjSc!))Hwc^R3}lE(rwJT*br;&i0iHe9SW^Lw2N8Cq))y|7f>^J`|+4KaAd`%D!7E0vtL4tHP7TE ze28(rwN|>03+e!Dw<;0v;ABPb#Ara`U zq!p92QVSNbZ~9xc+WOLNY4y+45U#W>^DKf7HHB@Ga_QV$&_1J?FK`fErRiAzO!8JMx^zpoJ8QcL)Z z#w)#6-mK!-brW;5!!0|vph;;q-7CdOk`0ZI$!P= zM8c~5Zx9J)h3SoY_#1s<336SRsu+?~Sjloue69oZU4`L^C;pttuc@n46>`ts(7f+A zsR0TV3}tP48#w4LynUO$gSmN%rf-GL!>UF3p41)oTp>%FH|b&Pn}?D@4Lpu2>mr%1 z%f#+I>sJM)_%amUslD6@ZdNSjO+**$>-^2s9XbAjKu?CzSUlQf$k>`pv#q2uHij;1 z0ooIeS~6-$3z)c1>f8Vv^)Vf8GQ$y6#|lq8XX^F}pK3Hj^uJ1%AdYa(+-=)| z+$-ndtpfV!mBx&mGEsgXEx#SOeQq7Lso7UKlq;Sg7U#gEO zc8XH7yQP@41YQs=^?RTBInbUBM~L^Gp|y%$cqh0Uij(K7Suj-k`paM&Q~JR(l;+#{ zVlk2Rg^TT5F@5;t$EambCdI?G^v1mCvA$3WtrktSd*3pJSU^9U7`{o3QUJU?$M8~h zg3>!yN4tkjW4QDv79WCdQ4&PnXyOMxmgkft=T!GkIg9r?uLYb(JTa z2e;m}+NGT_dt&i93Yh$i+J}{#K&)4+gkNm44;=fZKPok%$C+R6uxhrafj$l{`dKwu zeCRz-<)|@X|29FZ1+#DFX&ISFv7?@A*7_Q4dUS6E>6(U-Bxwda$A8fW~xj6xxHaYP7F}uBqPWXe=Bg66tA)jZ_3bUI3`--#mtk zv$1@KjZpoEF2|9T+E`}JBTG-p-838er(2RPFfO1zP^M~nwCwN-_yq%vgHKZDAT40g zX`7(Bwi$m4baZuU9jjhEZJC zD*GMvIiKc@+~coheg{LK7;_PYMXZ(}QdgRXHXn6-RvWTKldcP!U~b-SewEd11VjEU zO2Vg16=9l%{#%q!*~8k7u}%;m*EaeFh>fzQH}+U%T`J8}kPzx)wuEy5l8C*b*~fh{fS|U`QQe>BJig zKFQx*fcFV->ka#?sz3d6(1{QH-Ub^hQKo~p?K%iuq+T&3_q+~p!l$K84cu*Kf9?QbFzn8b_r~_B_s#r6=cA`04!qAON@=Nx{ zTfV??tBqRMo{P%CnuVXVsFJGa#+!l8su>IhSNcDp-Y*Eu!vXK_oKB8JOTt&ai?jN% z35?*8xY1>`Od&QnuuHlV?8M!c6XRX3osOe?9@z>T$?nDtiP4gsPJXi#gaX=-f~)WK zge_UX73I@%ChFc0b*%7aAtT)3X&Ao7tPTzcFM6V2vz@(*DG3cV-1dZ; zvKU-uEX`p3O4QSkhR5E#Y;{1f% z2|9hDsXTkYy-jMD^6YU$9@FpH>n1${3$zVl8`P|)MS-+<_-?iqtx3TbM)M)=2ZNEl z0?H+^fu82?`gZ1uUcaefz3|=t?TI>wLXK#Qmp~{YsNXg4+5clS@dm}G-@{nVR}wLY z+A-#V*Lu(%U&y6x$op)mo`h{0P9r|Sc%iWNx;lu-0QVqCZk)amurn(MwS1~KJB;e! zQW_ONR)iHxBcNeIuJ*#ow{+of{q$`b)5}cxu=MUSo1yYbfF1ktKr*WO0&;`NYdLbI zdYgCyLT4HAYSx*g(s=()%?WqWi$tNb<>I{(r6k0j3duW&C)ABwpwN>ZYt*+Kp`v+l zxX5lhG$5Va3gmIkc8pAv@bQ~+=)**QP`bx;7=1Ds+T;B2`P~N z9P~balg9UDe<5o@CkZjjQ>Y`4n^`)Y{;kEF!8 zT3SR%GRJ(dn7YFTd_>pCJHKlSj(7f0KUYu2*Q4-}LI-S}(z{jlx6!mU?N2M$<;%J5 zbo_+V7;AX!!>cZ%PSNo= zsZi2-^CI~vRPFFaL<=RP`6~zbzOl?;F;6NBmaztCpV(S&*AdJ?UHAFLkV9;_aQN(O zp_WP-*p!6}^fA7fKVc_(^~3xXTmR5Ls00q!d(={3I4r7n%Utug%B01xuU;~RLU&4L*S!Tp=6IXqt8f(eKsy|nIF51}IGtT|gOwJVPAZeC=G-=z|0T%`r+gR_j zvdMkDdW5pkG4riTpvLz~f^!>z@g&6`Nk(IG-ATUja`gfjqO8;QorTO>H7r{jLv+Z% zTMyiF!X?_k;JKQyB(@asANT4=(S|^d;89KUGkoKAB;zL1___M7Onu9h@6#HFlX#wc zEcg*0se0{8Yi{rZLE{x18x_5?24j^THC64eb0~W&)Qv}um($i~h38ki%tYqHMk6X)Q{d%|piDGX4?M>0;_^ zaVy7ln!B929LQoY`@`h-Xe_W{SHhQ=d0LAR8IK#)7-{SJW1kB-9Lnabv;D{|bp7um z^Ev}7R)=>M2PB+ZRDtGkq=BnOY+LkhFvbF%?p8T01SIkO2k*@IM^k=0F7dUY0-vJs z_sx;gjrT($FWv|9N0o7jvz3~;bq)65BgXh4ZK`SR$R97xSI?jZ|hXlyS_iI5NV5+y5eX*?y75>8ELXBe&(2@_ zvHMoY&}~qZK*084maU}yuxilj*Q2{ZQ;n(HIiI~?^VtdIw&3mJoU)_k5%7HUk&uDG z>)h#q>2l$sX3f6M_dI#gxmqK!PVv^qZSv~K>a@Z98r$o==lYMFC#T%)c4t94Pt(DS z(Zf5V-V`km!dKo?7p;pH5Xq6Z;Tgee;36E$KKao&$XxfnJHGemg5T@p7t_O|(UaTq zp7*7-j*Xiy`}t$bDAE1Y3^3!ey;Hxf+I64FSNGJlhig0ItLtjrgELTN%Xz$`aoZDe*W(N|ZQT*cc)b`rJ<Gl4G8ipZtv5rLTapz70^t4hV|9f!m2CPXors@hH?q<@9Ic1(emUc# zf$!+)E~-vhhrmhSVRWmr>G_$yz0G7Gy<|1s_bj-Zxb`?5{16RbO@} zJ~XZAE^{?1V&`#oyz6h?7;3h+<+)rK1wCC1C^pf>U-}-O0DZ3Ima2QrNPIUtYep+l zS!VfedvPYSD@teE?k8!1kNO&Is^?pZPa@@lZBIR31F)V6oj|D3L^=Xr+V%#Mhr9vB z=nS7)OX-6W!2{0(U+2emjmeRc3U#~Wj1HsDpFxNF+5K)BlzPp4W3}s3$?4RbHXVm1 z$xr>V?dJJ}4@+>-ZElA>9quCvA3*nw-m7W?W@d}G=lO&@_@m=sP2(|DX8*=|PV?3<O7$HMT5kg5W)A44Gi3Y?^ zJy!s{f7P$I*)m6+AV`m=@`!;hx)$3wvm3|Sk6q`=cIsQDwp*C1kv_rbA$Ef*)BMFQ zwp628+44%RBlrE3r&?qQ0fj(q1+HIX}lySeul*7o! zpPMwwEC#7(h&2+riuJv`_=4T5P;g8N3eil!VG~|5vy27nj8U%zZ?*OnT#pIY$RHZW z5<4vqIy;=ZI}U3QLE#Wi`WHjfAW22s{QWO@TFRzL{kkl91<+w;GDmE3?>IU@GNJhI z_h$|R5+~m~nrjF*Okh3Ps5V?lWTWlrs)=?9<1Jsl z?HJ+v?n5$K_FmZ5cyS%;q3_}Eo9P4ZahW}>*XLLB)nm)%cnYkxJCC21>*IW76)mRa zu^P_P;zzf=n-d1%#}g}wu0*q|-EQ)IfNNXVGN*BOuiUaB^`Oy2a2YImv}<#q(r5kP zo6*F%ILU&CXM=l{{UXU+jnKT9BMO)Ulp~A*6)W&jhUgV*6!|QH6G=V4lDh7$(MVRv zb-SpqF*DW?y+bQb7#P=i%UAH*dr{x6Qs0LO9#h+mb<3zb@|1WW+OHFpZW=9h13eix zWy-TbzOcAP7)QozI(qd;TmOmy`}X#BLAo}{~+Y= z@1u=j(LEoY?v6$7k^g7e0R|odyxPz}O$Ye-980a?o)r)Z`@DjnimyrA30`fchksd& zF6IFc0b6R{ARPp}opY@R(!-3;Bp#~JyW|*!G8P`~#~x5vlMAFi_$h z-iCG}!cF>x5@R(@w|!_ zlmph*_#D0ENi~2v?Wlb1d+CXv98*(GVU~D_T6QeDvUTP+DcT$|3Bhe6tnZfBX%qKm zCOu1b6IzS43K+GPZE^Q$i#PEOq^IhF%%BI8gP?VL9Ff&h!+F${Q`u^1S-3xbm8hCZ z=>(4O<+m+y@b_vn%Pj4tn9i#uYKIc2F^$_=504WhDzhGj)qGvL4)tMe*{Wp%QQ{2K z&RzCOibe@72BG7?Xqq>cuh)CETkTV9t28dge4sPb2TuQFwSW6PFYbe4t5y5%Eak8Y z!B^oh!QVS3p&zKp0K@u1m7(BhyM9KYwyZ$%20Hj|xi`)b21(x~DF|iPk&rN>@HM?9 zx`-_ANs*Wf`jQfOHOcxBf-Td&z`rhF6ON6q`swFoNFGFj6uN_(gS5rg2w@-+iyL7Q zqljJsLy@iqfz4;&5b!=Mi%3fDHMOCV`8ZEnY=F3eK-Hk1k@*DQDx7R7;gJMFN=zOw zU{=dd6CpYs9vf9CpBT@;Uw1w8J3Z`R)u-8@q&&_~4#J62D4nJX0nl+tn=AN+ zF63KI1VWt%A=r|3UGKQE_+kCoVU;~+Cxh6tC#&{6csK=j4R`!xk(FhhilqdRvL zi6AK!Q*cb3vUa;cyM6Yp={gSO6z@a_hEGUqFW(V1^P_`Shy&`R978{m?XfEt97W`H ze$&(@rrv{8rg~amQ=dm5uG*3LvK`3$Y}tcDXxkx)|=p9PW_$3Iuk8bYo_2RT7mWue*2TLTh+60t~;dJCL=X&snD-_`J`er-;$xuF>3wuIL+ zkLjpP5lH&VAUtd01U^9)n(P!_z>B{+#3-i0LgV55gm7|%5 z*;zW9`G~{mg?V^rTgTMmF+9Ft2pFrE?{nSq zz2%Pa>;1Nqif7XGMh*eo6TchZ(*)x39oJ?07x3qdo-W?r>BN$p#5y%~p#D4a<5 zbYz$%fv3g9N5388l_|C1G1KHi^# zl`?Dk1Q|5)#9x!j1Io1~KzCjtp3ZX%$4ajC{Os&DNNU7v1<}~#X7pVABxW@Rjl|Lg z2-Cmuk8bGh1>w6${%< z+WObmWzr?1&P*N(+wUQ98N39DYH8y5w89Jy8+i8c;?Z2;`GU4TZ3lf2$rxiof)DHp z?-E%3%uL=&(MlhI02c(p5h z$NmVxgpPdh%Cj8q%h*q>0YCvFyI{;D5L=q;zgzW>l`tScBG##{Vz3!ZjEJ>0sAL%A z{NY+j1fcAzX0H!2m7a=*lIQG=35sFBbR|F#<DkNoRe?1e2e@^NYU+xC^BMpa=W$ZXwhr=;bAGYsu`A(i&;+Dyu}UJosV2J%idjy zh18AY^vZ)i=D1)TBUDIgi+ZCl4L)~6JF@C6xkBG-ATL%F1&)}Je~ch*cvJ{AwqhR* zkTymL9hG9kVt@pJ8hgvZP^(l1<5Z42Q6Za%C+3UrI}Yg|-LHA@lM<2nj8`q%(>_CI zV|a#iqeTRn#IdBLO!npju`NimfzD`1ViS&nv_%NPs$>iVUE?Q7hAL6tFIF`l-RW6~ zq>|Bh3kA7p;0od-T(Dk6Z-&I<#?yTL+{2blb`GVSXH?ba?P4)h{L0Q~Z>HRIX1Zcn zcA&^*!B2v8_S()#k{cZX7Coy~iaS|m3vSJk2cH#2gp{MpBojguDE(U@8${8sAUEMp zgLhc4?%_oCT6P2Ghf%1io)odH;+Wje*jZ>qWB=k5B&x0xOyzAQ!wF6L#8YF;ek4?t ziQc&2$={7>Zj#)4R`h)~zupnOk}QxjEPWE=MVH!$gd>xl`%D9S1dkVwAB3IxA*Yot z0UrAO4XM;SN&=%8WFTVy1`IjNY~IIj@M?)rozg;mE;bS$e=Aa&B+pFyOHp7D^b}~f z2UU`8Waw`Vj^p}Kb@#xlF-9x*kdfA1!=pOaW9&&_@feSxVKO8|1^rB^6E94x(bK+r zf-15vB~p~MPOrl>0>^IBG~((EHr$cu!zyZ0m#t_QejhCQ6B4LtoD{%iIKY-H2=hZ- zci%l|MrS}gz0pBkRV=5^{j!STPO~cA^pk61gEW9{4q6&s+D66^UtG^9BroPoo~Af! zSoU|&LEnoMUDq|mRg>rE-*^Nq!l z2K$E7;jWeYztQ3d=FtX}Sw< zLrZg&#D57tCR82ibXMho8Er%l*)ApPI{Pd#t^aQ|~MoqL}^chCweV-9IaG+aj z4jVTqMfMQ*M%6--V-;V$553OPH3-wH{y{jUQlnbhfT_G;e$L_Sl;RB9NF zUImdN2y;T*Dkz2Y15%z?vl!{7(mhrnuZrdyNa)2l1CVzPyiC7o46i{Ta%KN0*|b)L zziC@LH=X+-zyJ!(K*P}feS?q;XP^kJl0F|nyOzJW!j=uLCR-m}mV8i?%o@!-Do z*erhEo|UFZQxC?lV!+4)%YetYWhdA+2xy~p)tTrj<&_*?Vy2^2-mJu$%#&)#VLtOZ zRlDM#KDEttVWaN-g*J>{Nwr=HwA4U3595eDLUf46B1(LDgfkqkTj2=ZH;=q1Rg01E zjYPNKkX2bzr%af*-j}m8Y%dgnV+8R8q0XZ5Hbwk#0%YR@@LC5yZDr z2M30|5+%~(pVWCMBY}Qxlu)x0#}mLL24QmWxB802cQf9Y-K8zvgL%l7vCynEovGoi z!u2-6+=O4{dUoAbbsts+5&uEc5?jWKx0rroy4+4{6b(!R|~KtI7i>r4zdZ^ zs(_o=$L`_+d18ff{Vw~(hO=6j`J-bUytAIjil%ICp6>e-7S@xu*TB{(5QOmYfY5gZSG z>d_vlz9E~*B0ofk6`n<5%83H#eSz+VMkTRDzzezi1*-#bfsr`|`Q9FKw8U+Y;{3<& z)#j|HMXuYUjpiVA|NY=Iy`GQ5jsirxP7aHl@myYCyDKZYx-Y)9f7M+$2No9O4~8y# zb7u6H1|JQYmZ>k}Ebi67AsOnmA@u^JE(0l*U#!y!BAYBY(hS%;oa)QuCK8{%C?`1raAuIK&X{F8!%_t`*=d7QcHy zL=Y6<^^VZ{w9CQltkd2e4hTomL2NfW4Okaxi8TCWOGa2|I6b+#!r3BMvPzT0Sp+8l zYg%;TAZlRtwt-&MAxYqi0h&bY56N_+Va}*Le3I=! zcAr}PI5z$AVOhh^Je-5IZJW$C^MvbF)@L%fbanjW_I33$kj4pHH7X?`dZ{y;*G^Wh z_y+7&JY>u17^3m{f%m{I!gLP&Whn3pgPAo-hy41wcNsS7$js&j<>tZR(8*N7`v$v6 zqi9d3y?T}ceFEo$^X*x@zVV{2CO4clvEkSI(h7mW0r9gNCi5hh377=LV~$^KEVpKmH7bMR3BF*PPy!INLveB z^>0?l`O5cX>e~{p7D+OatJpJ|PPZb&fxf+YT(0^>3+GXr?QTc6t2pbnWurv=AKZ`6 zZ?^}C8%wTF4)1^cPT6198hh{P`h>dQvDmVyy+4;j4YV5e^0HqWN7!s#U;RbGw|uiR zp1E)_^hNh-t$_qsa=rGI90%yUIX)_P`mJi4q6UZ#;Pll#S0&5gwdm2A9IaiMDsH)ZBI8CrjJiW z=dDe5bF;^(a1}PF`bJ*Zu8fPjqmA1?_d8Yhz{u=YKgXA*5bWHT>~90Ur67j zFE`%>;Q*C=J~{wPo3dt+#_X&FfeN754&8#RnR9;TYDB=BpUM*f3|G89jmNma7S#yP2a~kLaem z+s*0LqQ?Tghn2Y}Pv86MP5r~(3X*ruR|Pu0>^V=ncr(-M1*1<>)c&HBt9z}{OU^#q z_dQ&n(YYu|?oZ;)zxv+%K13ycXuJu;xcfok*>-#tZPUT*W^qy!b@!F0<1#uZB)s=r zPM`iWFsW6)g}{j=Ib1ZuHTXEq#e!E4e9Z?ta}3@8O6+omaMAYB)1(ZJT>V znE|UC{SuxmdwKwJkt$$x@!jiM&LUKn13A`W#%%ugO=Txjk_vV>`VZPib^0AXd;aNs ze4g3sZ>%*gkPGc2dfu;UvWFs}8*yK3ivwBT0pAfx9Y_S-Yw}_tQ0L+c=NXE)`U#6;<3`xIPqb(ML|K#RRp6qj;1yK_KjO%QBc##6-5LE zXQzn9IIp%ML1B2aCELXqc5|XkZJ_HoF>qcIZU@_w;9^1?it)Jr=MLhKC`R1LvVg~ZhpRfPXA8wDe`Noj(TqHF#4vLHvmo4v1j~@Crm64E~#+ZThXF zh#rQl2Y1+kvIzzqQ}kwE^;vb`DH-HOEPHitBBiN>lMQ@}Va=D?0?JAuL%%PCT|d88 zR?<+u`<3`xnufBL9nseR1B#(&b7k5g(@9sOhS8~rQzlHWm^R`#SX*YR_DlH= zMt`;n{@Q0=525ihZQrrN5n3M6va(VIoKr2^7(7+kMzTIESgm2Hs3Ol|gF?HzC@P%M zH}z~*>NMLjX>t7&Y$@gWvM@ef35+JH(82T|HrnX{N(S>c9C@3s&|+c4_JOp!rXPc` z)I6o+qj4FTtyfC*aD%=@s+1)%6*WT!*NH5BpjG{Z?F-bJ;+QI=LJw4SYPxk%P&?f; zio34RPSp*w$~RLbk}u-r%P#KOFo1OWM)AWj&b=;)7&E+*n~A)E5l zmKZ5nzB2fkj($0#Q7+2@noQ-UyqBEv?zdh3Q1z~j&q_r`VjRv~gdHzrJUMqDXp%W* zD%+s0cpqlbIxBxjBlB$@?WdKZlyfB>K_ds50SMw*MDx7?|BWvu^q&e+4g-h_Eq*3# zjtPhTSOMKwSt4)!eALNJR8m41Nn>@s-xlQRh*ozea>-24W&TbZFBwQe5MF zc8d&YwuI;hC{D0xW}SR#gXv_N!+t*>yfR4z)e?UxDl8V3s3D;?!+dbdlr5r;X%|37 zg#?fk49j1I7eS8hsRp~M;eK+Oj5m$HjRy^jbTpb?(-5uMmnM2`@lj%K6%%F3)%2?&~czz%_aOHc48=HqP}H9 zWRia)ANE^{M|3J}SCAm)wO4RDWlaqX&xp=rl+I(Tldj3od!XGTF?tdx7N)|(Y_Dj^ zx_t{fx;seFv>kDECLZg6U%0&5Mz`9g5te3-aC&CGkw(}A`0l6cYdV-Hj*L2p%=CZPO&eIFCAgliC8sR_wC@)ufhU1)_XFBk$x zG)O|%)n^lLY`4yyQzLc_mC{FO9j%mwIzo(e(hcD4N7Lx$eC=WJJ=Frm&+Lr7xMI^I zj*Y3so0WlZl?G!tXyp^$RNvrOL#PZhQ4Oz7a0$dY$AFQj=NE|sZzXd(1#1(0Gx6D(;cHIDNL4E zqf`7ExfblBVuPI?d|!aR=@ck8O)##Av!4~sW>gV?W2ze*Po8$9k=RIgo3!wrSuk2U zhDQj;aIb~meh)b~!w7~Gn&n=^m5PWV-6}&j^7SWm;T)Hda&Fh9jHzSnqyOwjgdT~V zT!@#gjj9}4$4}amiA2yV#vmnzn5h&lPXU<;1rZ53(p%}oKsjJTJqsUe@P-ej`b@Y~ z$?~`sh%lzC*+|jF5X8wyX}b|>M-yR6g%H?|Sjs?6_{mB1`vZ1@c9tfh{3L8FWAS8+ z7#qYm;i~`sOrWQ*xE_PktK%_~>?wm%f{3!h8kAyq9JJ|gXgPo$F_a}q0Hzlwm^Z|@xjSe@ofdnmfP7DNB{t*&%l#B=Z``cZ}=Cj0% zeV0zd2UO)j2|VEATtTx83$*w!uMBOQrd5+z~*uJhMu(3sC z3TMe0m?i|shm^&Zk1s&s&rN?SA18#9FDTRc7`cUMkzG$UL=v|GkJRgJC`n-sF>Xq} z0{s39{$dFs&pzCtY-~YYVy)1>|HDBzs}VPL{Z$@>cvuJQpz#>@Ydd&cc)6w}u|M&2zmRz5&7t{FE#HIx!9>UnI4#{o2fIHgyrrB}1lBrlB#de12EfBbMTNAhEGMFx2jsg62ghi@;qDsbYFk#z>Aj5~T*Em? zEco4k?JZ6bI9}n-O0Ny>HN0r(@@xZtlL^yo+ZEuc zM6v{dHDz*Z4UBtawx=*kHw$F$Q6TFznJy~#o@4$bxtK5l-i0b_*zI+>Yk5IQ4P7j( zH~{k%OG;*hMSviZLtxvh?;*n8ni0hLzr%^Q6#cFRF1OUnsgY|cJJQ25cKAEtUo)aC ztwkJcz?sZpt*imO5OsYn9A4E(3%LRzt7Rixm3?QAt7kNh;uM9hZa<(ml%h^M0ZWK z;49Kz5aT*e%T$gIMc;TBvsO|14qs^!e_$wQMG$?@KCc@q!qpHHX-^|o`$+<*L&`sv zo5_M^B>0|AHeGI3QQ@NU&5Y<`TQx+g8C+WYK0ueG8>O!-fH0ue6EDw?|s)`siOf&%_wWvdo7V{TK; zsf*aBZ>}wUGzaiD8gvaI->Y=Y+*rTAiB25180AntEEoGoPPmPhfS`O>_PXhYecn8q zKsYXl7OEp|h&Nc4vyaTJ=KG>ZPAWAXG_8SGaA6=AxrSbfBXwfvje;;r5(ug0p>I8u>9PfYBMvusof>bY+-#&g7a9 zj`CZghDy5Uv6}WP!n&n$+5V6Sw!3k4M#Z$PirwYvux^0&yk9rqOa1H!-U^xw;t-|% zkk8l^RVdwSs)Uv9enXkdGQIAC zmgJ?!kIx#535sdJISJ};V!P{nRp>gej_ePU^+MoD* zu=42)R67j}#x7gu+sScfr>mIFxS2m*rbi@Srr$5A*+2c>^e3HlCcB!qCNLOs9NM0@ z?!V{NQSc4XF&GR@4?+QUgcBz9L~;ALra>PyJTL}UE&M?9j*leefg?PDBT6#!QebAN zXJQ2rUaLjTY7-?*q44b!I@&q(aq(`k5Y!BV4?D#>OgcA2&LJ9SK;>}X)1usb9UXpU zhAFX>h+}v_eLBm%>W9^VDDnM=UDfu6IYtEKWZk=Z2{#_a=|?Ca5?4fA+oihIY3~{K zZh_%?ukAywZo{VWWqSQ4xhoL1NLoOBqnne_xR;k^bl%lIn>H=Iwqo90r6xuC9=lA8 z`$-@PmjHOa@KAy6OYSk+o|EozLWer#9=1HO>AYwkBJwe#4!jirwcVTdW_WuD=Begn z0Ye`3F|_ISERY2FmEVo^&SIm-oN3{57A~VT+xtOtC$GhA7Nq)EE@g(d$A}HE+ zzIb!#D&niR(ZQ=D+y2<3OSvQTL(2<)k6zDuud|Ow7s}BOHwCHfEw=+!fS>f%C9YU$Q{-sj{zl*d$%pxAWc>8KA~A{n@Al&@(V*4v%?yFy`6mQ@gftJU?xF%;x>m0JO+ofl zjdH?PYwx$4#9r@C({p@&J+-u~&)KKnoS)P*)O)x52Evs;O_@E;&?)BU2yz=NrMBMQ zQ|{{fo{iiHSUv7$^^$n}PH?VJ33LNu-!^XE+%L2cd~J3K$7FJ&1*UQzoR<5v{Jv@# z+l0aqTx0aPUt?lQ1h%g&{nr0r+jKwuNqj>j<-7$h|4g7)nVKnGmwpzvTGFYW|B6SC zw~5XXy@D+`?~?J=MQK*CxnX&=yFiGoM7_c-Xd;1@$ZO-_YeKtNyg2 z(n@(-`~&U1a^-k>n+_+H+qqq2q!OEkbQWh32L=&TPPLQRh9f49H3y3ABZ z*Wds)aD)V8P1$yKs_Oi*s$<{_I`}niPhKKY5>jUKLv_*5MWS%8-mIu8p--36R+XsO zU!aBQEn|?Hg=yJOgFN+{iDUSwh3W~vAO@`uba~)YRFqRjJGKopfKnzJL3OEfCxhnbB2nTyvdqYf{*loYsaN^0^$GxXrB-58-W1sfaly|GGH9?PYzU-3W$6r38w+%o>es4NI4fk0OBhKq&{n$D+bg(XJShL(a#z3Qh>vACb1L{_ne_C1GxXm zG~|~7*k6L$ELH-to^u8MwSbo}1)tggmM^6{?f}3&N4x>ujsW(bC3i*vuV4JV9|O3& zzmJaq0Wk#L z`9ku)&G#pB{`MIffBhB(G`0XhdN#29M~N)kixSjD0M0X&W)Vz95qY6%Edq$2RWEFQ zmHmtc0pW)a0YUyR)zH!}&|)z3S;62R1;bh|)VoCh?z4gzDTiEK6mV!DYzPR#e<@)3 z@B)=s0^mGDR{#t0ETj++N}Lc7U~~USfz~h3m?Z$#GgNM~ia{R}?A99y2(ZLI(m})v zban|q_zc}-eCIoj0Ri!y2m%84A3z9*u&ifjd|3t@h;jLE<-A)4mm;z5MTzb5Uwc!` zWX67Q5D=5#S_a?K`;SDo@uEa(?KMb$1%UKy)Aw=^4FL>q1qZ_TcS-j5Uo^w4{MGz_ zSO5OEf*Hq9m^V zEg$MCSjhqN3rBDjfc>mwROWUI8yvt13H-m*H~%Od1Vn+#3v}Q;Br0fU>2Co!P>|h- zz}Dc|{@bc|A71dp`jCjIO0tjuObBp0K|F8X0v-P`uHy580Ee0)dhR$vYhdbAzzfx3 z4S@OVlFHoFPfaKYh)>WE5a5^KABpPo3sgE35}E!#CM2$O+>R{4?*S&bt^Y079R4D< z7y*e4LR#j+KJ)+?k#Q%Kxyg*I!uc8-HKS zm8!!Os290lnspFdf z{O247`II352};x5Wsz!NxrVURdD$e=pwu-_F1PYB^T>1H}Z9 zXaErZALH(wc-@QOeuB&TU!TWK|A^5%SIz$|GyL~`-v0FB2YBTHi{$@r0{y>w-~bu! z05J9b3j+7me$fiG=J1P?HH{}6?M2ucKi zI#U67po1L%@^eGo?*Is(Ad+9m%%)B@|5b+hK$oQgRuTe!-uz41WXiJ?DF|;5fb?9n zzGCh;9Pmd$0QlAV_o!w9s@Mh4JX`#=3$|F32>>alfqU}nJpkr&SRkD9jdWMA{-r;5 zK)btuH=xQQ04zus9IW@bigP{0gLT2oQSkHc5&b+1G=2gODp$PscS`wI_AN9G46}y( zug}rhFVGvX)@PS1qB6*Fz_3AZeZ2vT{9m#Jt?q&6VTWAs-2R_R0U7Rtdsut!i>`!y z0M>KK%KlJI@?WUt1ppM#(f;2q`S^!*_?LBD03I254*rgmd14Vxf4-{TgP(twtETWp z6&Mc<@;mrD?ET-D&3`@SE_%Ta9{jDcOxJooU$EM8@bmAf_5LsPsTl0ao5R0eG93a4 zAZtO8hv0IX9|AC+OXUkD0whXaR97DYFrHiEk@MH3I#_Mo|KoMjU+Dee-`1e}ur198 zhKYfne@~4Hr7v1#e*^HIwH`I2RbGHwA{N|kpF{%bn7<{v_(wE%dK*8d1)!Mz)y0z2UV9;x2` z!v(OK)_(|9w1FGA0xb0GN&6Al6O4|3@UM;l=+7*qV=#+o!{^gmf_lJr&`VRrn z@!uLR|HC2~`v+_L`0v2{@Q0N+{>)080U(0}PX12NzTo>DVZmM>fM-Lde|h~d^*?ZY zy8m=!iTJt1FS_9F{{>v(|L!45ps^DG-LtEYCt#ynbAPlDfDY%ubtP~LK!dfP2b+WV F{{U4Z?SKFP delta 67670 zcmZ6xQ*bU^*R&hkwr$(Cy<*$e9ox2@72CF*tk|~wKks+8ch#)wJ{#j~%<8M#4q=y% zU=s)tTX)a$XhPEP9Q2Mx+yMIsgQyLb~AOY!_W@ z_qMKy6Zl|GUW9dg-6{%^pRh;C$fX;%U0T4>p++YkMwbMS#p9c&8T{=-nC2f{|GblJ zjOu7kAHO{7@~LxovFG}Z>Rg1DV8jgEJ00vh@OJ^NIPI{*X9=F|*??*6lY2=Z*4tWu zC=q1H>cgZsLHVE1K3#?C(ev=&xPqwz*tW;j0#W)%CoeoQ0{dP8kyU6Vpbo)3%$7B6 zU15EE*Ufk9^6WO7FCmw9K8sxSpkAEN+JbjzCP3zWg z6-d+PG&>HD+&zI(RxBpVG|?-JvsxX1nw>Z?aQAXW@{S0}hpQzn??SOVfpl=+aHDKc zf+lbhZ2eel(l={NDcG?EOSOM|?)u<6{SSj90oL1G#tbe!tV$-WD_nZ+gtr>PU(;!Z zq9HSDXYihb-@ws~NVA{51cdM+cHO&(6w9kRTkd^$jQt1q5QD+~(ZOq;^aA|=+k5V> z{ewnCD925DfiCS7XH;g4mwj zu+sx=ve3XbJhajVdrkuB**V6zkgth-uZ2t!e&=1)v0mc{Y)z;^rC}yUS z8b1c*C8I%E79&UuC2kr@Uryei4QOK*k}F1nDJk5%X$CD8EJ~|udFTarmjC|lMcjQ5 zals>Nz=F*&KgIk06s+jp23EWfKt0~?`%~o!P5#bBqtZ35njnfin^?SzNQw4JmV85= z6)!F6ww8p_P21}1UKDInc(0!{f_;A#Gy^aT7qG4R)6)`P2-}u^A33ryvMDejm2QM1 zWRt6%o0(@+;0NS%G6Vowq44Nn>_B$Z(*~&cgQOE5oaI_vsd;6M?ew)0g1eY9piyPO z4*~(3rfeENyqmBEMd&ae7*Do3#NVifdih%=2Y@$MV8)0&M^h5i;&RnNlC{O zPGr0JQ&f}r;*Pp()b$Y>^J3a?Gf3*qaSS9wShFQW;0mS1_x^y)_FMF-L1T`~h!d*Z zIMNnfxCv&Nf$Ac+oTU9`)Al%HH?gjHI9;{|%C78bQfxnW&g3yH*u~D+!Sgx1 zs$9%vHv^R1jcsr*aC8n)kI<~So7UrvuWI;h{}$RVAGe*0xbLtVgci;wAHW6fN4)?~ z+LBGEdudPH0hDae20FH*Z&#nY*>K_wCtOSC@bP5Da z4pg2-F%Rg+vj)mw3Rn*OHVjQ;O&#a}rs1`%gAH(!d74fQXs2ZH;0mC%fT)w+N4wphGv5Tz-v&yAXzd=feJcj4fI#5 zp7X$`#4W>xqM}w1gDnm~cq%c;27f#_)2#m=aC@Rlr z@d{7bsAB?>)NRwpQ;Sprme@&6Glhg~F|h>E=mztagNd%v$$;TO`(i?BOe=WquN=8> zV<~Y)9C3o_f|iC>p%UVAL+BxWEl43SHw!uNqlq(YYt1%O4VC4%ui4UK_Jo`G(;1pKk%WaGio6{<>Q ztsaiQAO&dT>S&J42^8g6_D#$JJge_gBBQQne(8Q%OvBdS9ZwB?cfs%U|&;UZ`>hxwP+Cfv!@FACrQRDv%!Y8zIj=7X@Y* z!UI{p^4TMH)_{27*aQiM4m6rwXy8GCMsk6Y9FZqYKOx*yf=-`em3Cq%I3UfczXu7O$psNkoNlf31EDB3)j&If@VT7*R!E)qoq z1nXpPD$e}e7~e!CtDiQB5kNTrz2(j2h?MsT#)j#u10yh#&S*X3N=-=HfKJDb?Xf%Y z#zP$dp(|DZ0z4C(LP&!HiIFb5Z6)AtPoXXa;sjzwqEr}wAv7b>^rxOKM?zW#ccWeHjhCg_0J(+n&NVG0aMWpz^# zT#cTX0TLMN9uAmW2=L=er2gcN1{qsN!{tbTuq@%K;;T%ju^;2~jG;r1hNH!c+NOw4Hb%!4JCg_haL)(% zbm&zZu!Tj@UdR$`qN8YxU?#@bHWd+-`r=jlT8ZwHK5%G!%NAj| zG~hj10Re-tFZ@q79dsV%qGbt$xhmuxq;}wti&xD zO!f2IhNC$JDDl@Up)*T+*CqgKbOUE z3wG}MHit-nYk->Y_x4AK?8ud9t}|04 zeXE&N-fK~Kk^Jitk{mgi68PJ8F9dJAh4O`cBa0yiiYwo6DE9{gT7h{h13JKOhPj0H zXG(Oj0PCP2I-vbILJ!Z~WoshNj1cMjsc|~tb*eSXvpT|-II`eQgAgMj&jzI+U=N35 zB^+{90YLqzn8I8Es4WUYmqKI4jsU;$++QRp+z}OdEiKfWSa_nkL?QT|+w)S#B4xA{ zvMy~^+7E4^%YoNo=w>k?T%l%MtaOQm#b~%vz(8o61n?I}JDirUa}YByEZntZ1_s1B@F`Ohcc?SPdLZTUsc zniir3lpuqs5Sk218RiQQ7v59n z{Q?rUPXILN4(!K$H)6oPSKmCirDjk*f~hK`5C8VFk7P6;H~iz>kfJ5uR4P|D-QEYQ zw0bnkja^a`UlJ3yrcXRr^j9R@8&0!|n$9M9Id;>S@x&c|vj9>L^yTcBt{!lv_RR?B z`gHV7^0+b~moP*B>JeK|PJ9ty?H~a@L>)hV`w7e1(<%8`(nH?7D&@u+49&Xkyv3mu zS@kj5_wX?623p^bzFQsuIKMW54NSw3E~ehlF(8?7Ceop2%2OXcU!h)uQNcF^dR*>P zM?I3qE-*p+)`r#zf^Flc-e=D&eoel$;rf(In`XO_PoFu!^QGJsN^n%xp01k5U)R&w z3*RKW^fKBmCr^}&{)?^lOIxv!QDv=3Y1E8ICpk*_0?#)$&>e~Zm`#?k>tFBNsj}y6 z-ZZ}ttHgK$oG{d$pwdSX=J?AA4WQt$*Lk$ZxlmxzKhN`E)zKL+NRA`fpZz zX89RR_P1e(QLuWx=28UZ$1sF_j}5Tol#x>*Pn&TQb9Xv+JBP% z+Z~?61_!G7S2ka@*VmkD`aZ3X!v^{Kn=i!JfG(19-Rpc<)` zu*0{y_9ymFd7OPst)?YzGUt_^NleZA)y)Yr>(0jdB93DZvJWw{=ZDj?q+8xfZfV0L zstZm{X(B!#d{DIw+o98TOmI;xrv>qB{r!hEm;3@bdv|iXYxSSK&a0ipa2$S1;x_p{ zqlT|NBc8j2^wrMfPFuh-3LSlY?yXAY;$Eo}a4`q+-& zwXey4hRJ)R-=LE);V{ErSt&k~E-RP`MDQEl`aWp=}-#p$d8O&?R5 zhTqGf!`z5oX2ItI=|Y?5fvgv45W1^WL_x`^57opmG`@ z`1PH>`gcyyyVC4x?4G;j(xc4Z*>fa+%s-Q;|LZV`u7H&$0?uUUI!=?BkpQV)_HcqV!;gbjzZnNcLQTz_KXb=^^^0$A(|ZCJUGSrkzc}5_-kNu7 z^x1?mwk#;nyFEDX>zIR;e#vQd^xu#A?#qQSN&6?1=C#e0!8@$6mBG98XP&f5o@3^H z`s>s3bb`KiUki$=iZ>g}zn^7*?Yh#xnm6n(=x>=f%nSco%j>qKcMh-~-1hc2@Lvr+ z_xgk3UCrd(?q4qWEhe3-Rzq5>owuMv%~BnFReySaT0$-Io_60aWAPr;jm)$kijE5X z4m)>fzi4jrYi!E7BP?j=E>8Vwf1D;qj5zc3H9pHvujh8Le-_Uz3=n4l^1U^LpY1wJ z&U<%-9{*nLbiA90St@lkJNCl(FGqh6ApbNAp{Dx^0M>TX^4k-ik2xoTyS^sW7d|m5 zy%0Yw@^b~fPBd>h5x-7qhK~Di4jA$+yMGo&rzYYodOrz)fq;O1f7O$q)l@Zc#)f7? zqtKAFn~9joOF(d^kHR2k=;z%J;QDCL6%`k9K z2)y8Q(R4+P?Py`jn_dldtQI%ieJ($@fZto6U5{OKx0zXA7&V#eP=v^uchkvYVPzgY(KV)kPLx2d#rgTGBW;5SHCdhc zmF}^z&0FFvN#L-#_>e+Qjy9>Dp_N)aGyf%P;rEyR!(@VY_^HyP__|JW9d!YQ(LdD} zylUeZ)GNd*9BL|Z8D+sdldX-Yw^qKD$Bf|`FhQs0aX(4C%w%b*ALjH;!I;fGYn>JR z&BqtsyTT2C^_X$=RiNX8lT^O3C5+}tf`KNSum|K1$Nl=y4>J_c9V?f?{q8Isfe+OE zH8#TB;<#R!+UZ6$`NaC92ANlom9imU<7lF^Ut-UjJJ|BbD#Jd|HNtIM~&i``P+Ok)*uK@~z0^<>@OZ`L5-n0g z&tSx-Y$PtF`Bry*K-p>)b%QUwhJ#a#@TYlrQy{z0T#knbetLS*8D%eV@_$!~E^cy(2j}pWy`na5M;W?(sK>dh}Fi`)es4aTOtDn!Hmn zy#{IDdL1>x><;0xTl;s=$5Cc^8M9T=t4i?mYILtE{#&d3uO+WpK{2G8Tn0`*L){}1 zVeb!Htt(}(DblIDWCLSz`5(u;la;&>bFf(v!X|o@lN#3G(zk;z9NSI4V>jX=)G%s* z$+~ZEHv_@j6l*Uj^WB7mF0}I_HnT*Zjr#MQ!nA?yXR(JOqCEIymzDIWiJYcLloX)Q zGBlaVV%SZO8(hrcU4H8!2l_lOg74xaPtoW!I6!&j_*+nSM~7tdW$-P$Ph#hh1gOen z=GU_1YQqby&-APJ1liY>7ypp-idP7LX03or)~$;ab2}kz?1~DYgj^ethH!P4jOFQRBXdwA|5QsGeK?Lz!^^P3s_1&?xF2oeM1qZ@6Wi#5FPcvv|1{8#dyrI(R^$AYT;bP(7g!A%T3ke zDXn-PbbUnN`N%Yv0`)R_(7Hd^lsWee)gG9QVSK1DgS0t(|EF5+ z;NP9##!0nV>fvl-6vW>$aO*D1Q>nq$x02dY`REE(dDiy%S}9J=FGPg2Wpe%l79_<+ z(KjeHhBVL~<&<=HS~V;+gfs%68j~7eCC8d*{~;iSp2IXEJLm*DhK`}qg|(@_`@oBV(j7AlIb;vrV1i?YgOH%Qf#s*EjW-56%oNgvz5Fx++wX-8 zqrBNP;X3>WBUvHMHOM#IGQT!NbkK8dOttTg8Ah@bOasyHhs{f%$Q2Lp54&3yClSL2 zYdF+>IfZQHGjfm6(ygv#I6(DU6!ZTRqG4JBI%sa})0+QyFxahTix-bRbpiu#1!06c zxj*bI(gDrP%%;%DM1-B)Z&J^t4CcfZGlIcG@|P+IEm}Ta!LR@hM9Pp=i6T0dMc|eh z3(Sjy9^!a(;f#tjB^?hC?o2xTvz5?LPIMDj_!AMXie}UWJ+Yo5#?cB#u1!Fcne~I7 zgy47wE6$hOQv}sDsLdyjDyk}K0&3D&D%>6kooQrmZXD1uB>UsAGqsC57mbrG>7RGb zHMwsrDvnGh2QM9FT1Yu-GE#t?w7#2?RTjB`W))DT$ok7i#K{2&6qPK7nvWP9(PuMu z*5dO9gJp+dJ#apgz+;OUea~kR<6cN~H`3kzCT%xyF*$u-`$no|{<4pz0+Mw^(gO9_ z1JVtky#v|lhkOGO+(V=y0#X`KMuJf42ip$-FB0P03j#}kN)m$M0#*PK;DT)wLTCqi z5CWzG8QCMD0yYB_;Hd@47QpC2Xbzyy!~7G1d_#T;EH?y;34~^XH}9v@0+Zb%_e5OT zV`#^{3Di--y$StDfO-{0VGDvJ!qyRT@r2S5{&NPFTZsDER^F#l(wS^E+G&nSpILLmH)K^K_5%EBN40To~; zD@wBff`%DFTH^SWQpETbK&Y_OqzIe}q@uj(oXCh}(7>^q$u7YU{__XqctX#MuYPoTn)9v^0AtFKGqC&*fsIOMad&igzhVZBS1 zzxsE6cN~7dJrHpOA3XDk{(idJ+8*6W)#w7yz@CDLpd^p05~Vd&Gn=hS8jp?14VbDb zbQsxFF?C|BiS@D`GbD^!H#CM^BuPS18m(}Uc~u@ zv;>b$UZki04KGMeo^kHUVpnN#uF{`eQ<31|?W;!PNuMTLDbvZZYdffB^8g4A5~$100+9bl!Kn@GGwC&>&$B2EvgkR>S&yyl zQgg$S(e)Mt3^*^sEI4u*J!OE9-QPPfBE4_ zLgVD|cHK_&a{kjB51Xy(@!Ph3z1Z-k{xb#oZ$qTh9tUyl8g#qYzH(CCbif00h8GmLfMe$q6jhOSvhjS1Cw4VADCZ(|HWuR;VY370ie| zu#k)kfH5B)*cz2Y&fa2YR&-$TcV#GuxC}fq$5hH(dN>G}LXv)@ScUu_p)vCEsHrkJ zi17ju?6?bvC@_Y2+C?gvbRyP6j8|3=9mDt)OJjzndWNKan%ZjA-w=SN@-^j%j3?R$ z2;x34V}*7pFIu8DYl4)xTz$J~E8WTnb~Q*0dBcH$j&a4P>9~DLCW{Z{_zyvYX~21c zZOX(FU3ODiH;svMW+4%-_^NtcR)E$jJJb`JsC1salu0oQ?AbKvD8XeXr7$QWYAge* z`L=6G>$pjDQ3dzCGzEY`yN#$i#B*g11y1-0Y02y)8F`7QFTicHdOBbvXBfl~?%i@L z$&L9w>senlyG{?i?h|}v5GBA{NTt+CJWlc;VxmGmVI&*F9=wJf`W^$Zz$XJ^k6CGX z(9V!~HsQwm#c@t9o}oG^6d2E2$#}7yX3P)YDgIC?X=D=vApkJc0)5Z_-1SlavyJeA zkm`GBnH@n62&&`8G2{CNGVmoR-=X`)DWh zQZd8GRCZ2R!OU;DkDwHAZn!aML;N)w5&+Hlb{zR7SmX`7dW%5@lMgG|5f<9fTvgnppQL zBefDM#?D!4?Pg2ZjgPIynRmUm#lF1Z5;(^tq!lHOurPe@F|xMeO+l_V5{em)HLxAB zhmPlCimUuEF5TaN6%O@b-0SBpXDQ`#isp2ClzR%Fn`{7mH@aGDa1@JF8?m6OBvK08 z!}=%831$fTHkVji!8Th+3Kq%{R#FhJrBqx!(KO9b;u-UJGy20#(0y_J*jtXaF0fU) zW=BW5d!%PH%*jwh7>^vb*ml`OiESwOglkF+|-+cQXy>~W8CB0lok zHz?LkFT+V{$sDld4(kEx%xmOEfL~-3KxsQPWM)(_Mz;m}U@zn@nR1g77!-G`01&Zz zZvNLo2fAF0af9yU0f{dr>I!r%>{?(VQOFOCqYgM{Kz{ujP`e21r3G=GR2Tm8;av%% zB;iZ5BDMuvHn1%uLq}zGS1=G)Wt zJJ`lBSF-OC;_K!q9DFaoFOFDCZ({3lni(WTVxlj2F`PjaL||X!2&$| zD-R-X4nXb#zlv8{SNgO|8E_ZfYBJ`22_XNopmJ*+D_FozvK^94Gd_pGR%_{GV6eqn zdvLbS283%~degiV(0qPxlubfA- zHF^q(IK;>U`XRbQtfzp_fE4F5+6M&ZKqvE@g}V?8oP?o*(0D>&;3)AOsqd(sqe&XV zAZbY=_J~r7`$mk^iAW029Z0W+i1}vZp87wx5nwC*8|mzQsBwZ}IQE zT2bo_s-OvxYwQc9?`-D2Iu{U%sx?s4&u_n9?8|QysZd|whZxMNTKY)hzc?5l909ye z8Ge>Lz%Tq^3aNTd9h!%HPd%;_+*>skB;y>B7q;WA6TEo_*Sv<*ycXa$^|0PX2VucX zl>S%B@gJzY<#h1877!H}n)m|?s@4}pg&b(R-CZa-ooF)GwWh^Yl@;I)l#>8f5V$t& z#)F)(dhZ0Io%`g!*U5dTxt!|yK7+Ys6~~L85+7WbDPi!?2;y%ad=_$e8ghC1H#8qi z%y@oJg?23^SsxlwEo+}Wr@_Ya1V0PoKBk9MG0=>+KPQaRzh(%#e$Ons1?-LuLc!%P zy!K#9$Xdj@W4j^K6o9viUdb^$;*`!8hs;SU`d!!?p-Oit+>$_O&Sf4I$QwoI_3k^h z`4g?6Y^d}OPH?Wh4(Jxfv9#?@+fq;5f8cq23yBCvM`Q0wHqdZ~!Q+T5`?SSkZLot% zZ|}Leelfxi($KveIIo3-AwW^e9wdntVG-qF=sdg`sV`SV9AMRLB%-gdmy8H&Yq5;g zj#g?$Q0uGM-lWhDqsAD*3q_UtyQk}kcoa!}TozG{5ypwKOT5l}LR3TiD+f~RP)uj} zpe7ePLBU9&>fvyH6KjGqy?1N`4`_k<+bt;O=Fh%YhvDd0Dr_qQ{dqzd;WKI~r4dMG zqpWi}v3e`*4j7eoUl_z5H%EC{3nlhOR)~szkuDsPPyp$stJrj~&+@DJK}0r4$zY-2 z1RqnHe@=1Y(`&2^EFMzwQG{Q#05%0LGh1T`qP1GRDVe*c@r~=F;e!u-i;2i@Z8*_9 zDK~V>bIRj7dBN2nJ(`m^fAqzeW8OtO_#x|77@p&t1)OIkk0*`n3xY&;Gf6R$_JKs+ zMKd2jnt)ubJ~9|f1})|6eqO(hjvA&HKfxfnmoE4XI{gyEp7#GLkH3oh501|#npO-N zfid<@=(uI|K`Elq-q8~T7*QrbR&4l5+5(lqNUH7*g1{m^tA|69>o!Y!*ua%{UUI3=85)AogQxAcp}d4K>Ykb z__qfN9(pT>QT((*tH5Oj^-qd5y4^xi9Uu$G@~^8m_u8^Mt2C3cRwcQ;`6-^N%c5)6#PC`cnL2$l>0UH7Y&g7TYen6 zZgOr{l5!iMBecL9DYQ@yv{zX!9^ z0L}c~howltk5$BHhdB&M%D&eJn@>puyE9%r)(^n-@M31^+_Ss?ufi?7>!AV5$eRD} z_QLMlV7fkbCJq9?_g=CM^7nF$^Y`JzWS6j~>1jz5aMv*Rd|8&K+6F4;-^j#9Tk11}4 z-xGV}PJc4xIew!B9^7~mP@Q~812d-sP%QhOy+ADUFHf=N>v>O}InFNdT*tNmF99~u zTb#Mnf+=MC8>~K{SLd97M;o6VUz6MPrNs`H4>9PM2rjcrpZ4B1-F-p! zteij>5I6rgj>{3_PVY$C=9Ei7i*9cxk!Ort?=ZKqzwq6O?1l)x#Sxea<8jP}@5J~< zqpm{2TYSDbc`mzi<-gJ>qghhPy=Z=BX6~%4JTAUgqQ7w!e&1DP*3{}tBnt!R zWfWzj@pMqQClBo8i%YvpYun}UZgb)!I`)Q|CAf=~dLKf+qs* zy6?}Q?S-D-fq4@(*wcaqvuT{&f5`!$zTa(*ueYaQ<(QFnN6d}MPsw_w0dS}i5))Y5 zLI{b}Eb?*5O=RMWWTK4#GDVmYv03C~T_Vg0@&`DtG(JU!WXy^13CW4EiSltql;|%I zuTbAK-#FjAFO+xm2k-}+clrnQ2Z|11|7_n#-^6Zl|M)N9cTwl)Zh8N3-`sA6pOi1` zcbA9sqyMS~4{b+;pN6F@JX2!IJcY-L_0;BY(URkj7*_ZPP!(gj!FYarI=F3nPeA@CyPas zSP@i8y`PU>&m#z<(or|ksw_ur+FQ;K)>znTWwUE#8E@?45i4{Px}T0eBn@KiLY7we+CGO3sSWjJU<2ZsCp1u%|zl zx^kydW2Cvz%F9}bdOS&d4s{i2s?fe4sPHhNJ&k4AzFL!~bMo2JJZi0~>)J>$qEDgg z(3?IV>k7}oWE7P;H>AvAqX)B$#k%%Zab6tzcGfQzq)&~o7@40K&YVVPy6yzY0Zt!z za}<42|0~RtGFP8IN8N9KE6tchr}>D|$5>s;zOiLcwPK>v*tCW;*qAul7+rr_9yTpn7*BC2m66pp05FpH`J{jl z!)uuEoB4?{;(Du=cBeWg=*mI}r_e^1p+lJ9S*o4X40gH})JV}TKk^?E zA+oVA6h=NJ(`Q%Lyrdhm;bog$=F2I81~p%tiL}X@H)9adUIWY54pK3_tP|-V(MzR} zaVE*Yl4?iMgEknILG9G{F1Fs3(PPc6#jmgMt`R4H%SCS1JOblmwsH zRZY!b`8qtA`2`H$e?&-UGD>HmVd{$b;+c_?4fqCA$t3mR->z=A!P&NwWT-ZN%6bm{a}J**A2 zg(hxd@F=Bf&0Q-Y0p2Z{H@bOCtG?vv3!zT*;!ExwG6>ssdXgslwk!Pe`fO01MmCEc z2t&lBu>x&_BUGr?|B|t9ly{iYQ&{@9N$YGywHvog*S&+qL{`!8CH{pf2@NHpr?3LI zroE~$Qik|zJ5XdzzDzplPur1Ti{b^dg|wkh_b1wq;@+r15pXVJW%I0p(CiY8KDEBk&SPXj$XV?b{(;R@OeYvRCl$_ zPj++9NlQ~NR7+7jzfEvnbu!9e=EhNRV6WChNpw@wF=0Q$XhYw>b*QWl75(W;nxL{A z4)Li5h;LU!0Q~#&d6+buvwVr53uf~|LK{ix;~{0ObtROD-$J5}wh{OOZLdKrGW`efo83tCW;c~xp4DH%kO=Q`uFCId0!2;iU4Nf`Oge*29iZm%|i%6HBZ z8I-9ojkjJXr7^_Azlh6!5G*3n(z*c3m5RxhALHjoASPHruC|eN=abU5(Lt(5a9&H} z4qF_wzK!5}U+`Ms89@n29vc0r>rY=eVW{&+Fjxgj@C-_3lLQ$lMmEepy#5G{;LEwE z^@Ess1Rz3z4Jgm6_r!W8ti3M@EFa=fB)k|dyjGY7>f5{-Hp-3=#Xts5<^x;S2fKie z_7l3UwgbR#_Br@`zRFRTgi`vwToBl_SSJl>)qu3*>!tmD0qTMuG@j=Np%&lH*&>B# z=xKd~TK598D+vfiW07V|_(&WR(t;z6cchOe13W-|Iwi5S#)}HLuO$lLT`Q~OnWG{8 zK|>5WAmzh0MeD*Ik={7bH2d;K3{nxaw0CMNDE>A)so)w;CMC+E6ukl>>0;Xoe!<3* zfRpf9pJg#M<2FtLHpLc8D>ib1(~euQ;BK8+C0e&Wp2+C&p4ahPCX=l925KR!ZeSn2 z0bJC3;@RTYHMo5d{GrPy&aOG+SWNh@(QAq&6+F(0f>At9s9Y^x zC}F!?5sTBRWI}-reu1pUNq|8az@uDkkGvvZ0GXpf>p0C5=EOB}8E>>D5Qc}KkEcM{ zzZZgUG98u3G@*Ln*#q@<)-?+EmyO zj@K4e%s%1@LMW_fHFAP6RDBMWFJzK-IWumtA^?B=;?PFU{!5Lk+Qd{Yj{1T0N&+f)_> z`%pp0&}<`!IVV6n`*>s<-@f-6cL23segL*=Cmn_M8V5b7d~cLDZGwO};k?%~g z@jQ%?GaPF1PQScEwiPs8-#kpB0^mPZR+=CB2V~M{)?fAA{9i8H2^QINpWwm)Zb1h_ zz&wG5#UDj}DlV?P-lXfexn?%OJ+<_rXT@KdBt=E8H|CZH`O<@+`s%nZu3rkAqGhjU)(;fN`=pC{geL5nv>$54V6K z(I92Z5WIjlW0VqU8e@Zn-5j&fGTwseVE9BbW9Ulie7Ks8wOs@u;UKZ&`|207Q`+d$kAda_6Aw07VhQXPE6W zZ))e=N?P^MzYARFfF_7P2GGV0#3&sVh08pwFL<|Qls$j&kTX--!WCS9wQef^0QC@Q zl4&pat-(cG`A87g%t})uyG$03l3L#uW0tJjA0%BtJ7kQ2_tEgF`Mtv&>$Z1*c5ysW zRbpugZ44XhNq#78FQkh>7cbj7W|mwM;DWOCS2*`zoFMN=z7gt>8i2J{_-`~wdXXtF ztO06;BTcRm><}BPPN*f;EIHT?hC|c_T@E4f<)Os*kFrtfiitdxDWM{V61sU3)p0|8 zJMaf5-G@_AX~J{aahrYkbczg*XJh0eh(~vQa*feFw9P9X24Q-S$xM=t4|A7C(N>WB z(_w~HNGzoyTtqeq9YFEG5*X5eYFu^bfE8hOiEy;TERgaZq&K!{5^}7AgMGFZ>+sAc##=Y|H}Ew7e;9Wek@iJ$vi z(8`!mv%TOU3h<4a{n)-RY@EXwn|%pCI(j`l1c{x9_YH~fe)fIIcK9lGY+Cx3RpI}= z7(OZ&c-;2f|0)7>X0T=up z=ZB1bpSdrLadZ9;>3tWyIEep5Mg=1O|Su&vn0#b-V9ZCyHT+JxP9- zv$ea^0QA1+5i(w;@@VX|U$^jl?()@gqNcdsXWo{4r^&v-G+u9H=gEaNVoH2Jc$V|O zdV$0Ea@!lCa*EtO5$BjhlK8)m3I-Vhn3ZaiZ+x!wb9|b5+6wDTFslVbPP>90-QWFf zy^jZ^jC~v~hi!9y&kx7zrbgH<&{MlJU;HB~0CG(_tZAXf9Lw*~be5{i%ku{e7fU(~ z+6`T=SeHxL4v?n14))n(TGNM>zj?>40D*TIg5M4zB(W zPo5W7N11OpZLAe9h*iYwDJr6cd;N|xk#xf+D#@y-OE$V=GPOluK9D}2px=d#hU8dq z0JHEoFi@%TMz=Y@hwb9M$1|c{tF^J9LtzDbtx>(^H9T{6SJIy~P26zW^DX z78tgu6=w;9!{uHtP}tA3sSX))_VQQyXY6CS{O8NP){`%)wd(Z8^p5c)Wg~e0*pK^r zR{PJ}cp~i82n`-li*r7`0%V-hLpFsuz=Gsi@e|6*Ig4JY4VxltvsB$Y3%g~i{k(jq zcz%K6V);4i6XF@=8R6pT8RhxollTY9w~6(l;*#c>#hJyq+Xu}z-Z#iM>IY7baGzM8 zoWo+>{GX?}GrNz|C&sh2v$gZ=r|c*C56-8}Cj`H6|8)QOol?OP!Gd2AfY@%S?5_ks z}sk^~2x^^n>+-_G9-6aPDwk*VSQPn0@==x^z=!pZF8@_$>gVwD$}A|3TOi zTNrp1ti(V-S{%vyEttv2HXeZW`LjK{;Mhr9qIoYbBBVF{x{l-y9S(!ajsfg8HN8Yy z!a0nnmMhy*CQawdYwJE*&6>2Rcimzw>hQ8{iB)<+t7<$1_DGTSGSS=>9dOrYiJs($ zxFZ=C`-+n^%etYOROjRKR?qG!Ozt(Rw2LV1AIn0$To`%bSY7{7pnO30pE}W+jSbxn zB}7JR?K~QVx{40BoGL;FnnJfBehqMBcUb$@ESqUC+!k(5s4O1ZF3HWPRD`?S8TP^V zq-x_3J*LkvMAJb(}RGGQcD|&8NB-Zx+wHk~BPP`N@p}KaR+chgR zEtZ$FMcG|{G|XZB;bc>NYnPcXXB9eI)VbKM)3$YL_eC6Av9$rc?X82ap!C&y7cvn= z%<9cFEw<65Jg1Z39#vZIu=vj%m6Zn#XN*YC?3pNoXiN4?0m9j{-`Bf<}rTV|7l}WU3{x%|Z(xihN+@;j&uYobTRgw>^sW3q& zlxi4LdIEfnv4`sEvuvt=t1B_35g2ZNG6=s|rjSSyQmK88@X(e>mXH$lBDGK(rE0f*}poCw@uMO`8*?eC&_|QYW!uDYp4iZm#b7M=mz_f+On^ zzu@CdNkx$aMSt>FDK>**^RH6$fub*=qn7&EuKyabuhJEXU)gLwI#bNggv$!Q2oRhV zO)R4fw)r0GTw3Gx9J&TxRwi0Ny(!fsTyxRtvZzqA7G9aSC=D-AXl>$>jK})&J};on z(CSr*Un%qr9AMiIFl&ef%pqpj@dIW}K0&+Ej(QFejI!;cEd-+= zuuXZ2Ni*olkYz|_%b+R$i4#F@Of8r)$Ra^+B4kZoP?GnA0#%u^PiUB7efR<3H)f6sbwf;!m-tVFgA<+RR8a zG9uFf=J9CAn%Cx`Xe6i>Q&?LWvQA1i;_+3Xz*(tZWs)mKSVXNj=Zmpj|D~OLAQt}@ z)2;ZwOnsfy*PBweZFRag}Q zy?_3>HDC?K=RJN|DDV^Be((Y%+thxfNM%^NH8-LClyX_vu#_y5spDkc#795By|kKgWd;B;Q@7KDi-ltj91l9)ZUY3*G}4)6<0dH{%JHuSImOml|Ig zo{;1lnT!Dl(j1X2<8dow{OpL!$jHlJq<<||>Aj*?1{O1>v_Ww#&Sqr{02wBEY@7r~^W@R0*gT=})!;(5uYqiOUIy#P2Y;7) zd;@&2$!9X3N312I*D9m0gwa=N{7R`itS11gu9AA6|F#zN#KrRX_29A@ zT-JphN19OzlufWizI!|#vUtQ18;I!!#dJNGUaj%XD`4)nUlaEv{cnbxHPoeG{C-@I+8k6r?_nZvorgRgG^R5Pjp@@!uL657dXojT0%6tj9(2 zgf2U(VzuI@&z?t)%q#f=`{2`kbF*zN&wBLa5g zxD`HMRYg1&Ng;OU5WCZoJxPXvm%i7O`4Kssk;#Iz5usfux72j~j>~UUh)u)VbSq-h zKSxzFJU95k%(uZk5(fv_l(!;m!MOB%uP5p_JNB*l>eBEelCP-sxd#l(9%*!8q&jbZ|`EJs^g`aaeK8fPZ6DHgIgOUxtTJhEX`7 zL<~@~<&%IKjHyRbkxbJx%fj3Tkm zS6PzEY+($gWPd-f2BrL7CqY3qWDxS{IRqo=ISfD06Wx7;o}&m9J@+8C^h^UB^xW(5 zF<|aiWX?^F*jwLG7kiPmj<{Lx6SrvmW~pk*E#YdFY6H7Cr7%%Qv*7XD;hD=Ev7hpO zzsmbtk@vT0{8mE-Mhk6hG=p&?Jr5`BV{)XhFab;MP=BuQ4Y`fHfpH{-1`B@UK-Gr> zVjrBjPX*D2ClTf(RsRfp4w?)+RaEmJ(1c(NNBm}c0PB_JdW_a7)yBkOmc;g^Hx4v*gr`rGk3P9qC)aIx`u z{2ssS-G50wxKowK4E%6F;~B-{UT*L!Q1%ZjntUgYkP>%!{5~+fV@%_B_lx^jcw%gj zkzZ3XY0BXswO2c)CY1Xj;z~yxBoPOdo9}^`do_N~(ui`>qydGHcCeWwQH3-BeuUg` zH!9A(w2U?EWKh2%;C#rRG4F@N?_Wyei~^^}vwxT!Jw5>_UPXS*T*C0al(-8soFrFJ z`^ypOh4qjuz8`YP{Rd$QS$@dlld${%+?nkcIX1}bcUA> zkvvdQ7!VUsa)K3DFNtqLRmpAi5XsoDGjns9B1&_F#`c0JmC; zQ-!2&%ko#((J&ZQ^))yy^@C>ngm@5xZhzV1hr#p^Bv&qwtY=M#eNf}5TqUA_*aySa z2{jcjWjX*g9z&g$Nk7eJF)hB75)I~vhcY`d;vr!0ps4FvQPX(MuVWrB<1%ANW0WE7 z3zYUhnvsG(G*^I0jn{#ko+`Z8P^eayG`bNz2us$}$wHew%FsR1KQUUh=9@Q_8h`a! zSx}CH!TKb+3zq8u@7p&9#T<nUp zDcAh_-%0Xg=a@}-bTlNnZOr?g$$v058fD}m*!eT;JchY229d{!+&>zjizz>@j!pQ- zmX2@(XC7*vdC21r-<;9-!ybPG(VRw|c_eK3wd_&4IeGlCN81g4HH#qjMSl=|j63H+ z_^8K!gEJ4{^)cG>`20SLU!(Egc>Hl}9LC0cH<$qpdI{T4&~Td)kK?X3>wml8aV6-4 zieqyOri<$dz~x!B^&}#DZCpITYRxN?UQGwp(I>D;HQ|KEpMtOlA?(Qk@e~4eqBX@& zn0gf#7B#Xw9W)Z%blan`_bKc#eHNeic-rI7fJqHZo(TsKs0GN~(ACbbWBOK~e!=6< zg5(J5!L$A11tz5B46(fR{o)lC zmQaY{VyY@XT@YV}%*23r6;64z)hVx<`lI*@&B1-0f8#!h;N0MdQ-4&+PN^30GFrq* zjlWDSB8AB#O5+vOcnS64Bz<9RY2lTWc#$bDzk))yN#m!$k_zRkP=n%h+T*97?L25- z>lYpikI6xX3Ss7{Tjdqphi$6Z%)eeEKVPj{1L93+|5j`6y+yhAmdd@?YvK)!zuxQ^ zBWlX;8G~=$P_9M4VSnw&VpOp=<6u&wzI@S!mlCI0vkBDFHqO2U!KC?bJ$@P*KMsxG zMpL0)3WS>D6r*wKSE}~B=HJ+w9Yfkt`o)L8j)vJH^e(Y%zKmv)bQRHmgO`rFa(^@WE&NU9z^oD zFk&-344yR3!o_x4e>%-Z?KXRll}zi;q(N=_HQ{X(loXob2T+Ebch2KyVAZo&`cPeo ziTAYAj8rh)27k|Z{GH}}^gEBg3+m_K>W`GGqvYzO|Es`RY94A>vt=9RUK7K^Coth- zkH1HU5X4XV#TP&n=AUsioH`+@t`IeDljPy3N>OCQ`-G@3*-j?D?3I@I)DfR){L|&k z_;T@gyItaQjeoYdF~qE-znP@|rt!}y;>~cQp?*M_8h=zH6W~Sn6`F89fjM8K#K(Zw z7&AF>OT+j&xai=qOw+h0motUhj9HCdcgIn5dN;&8-s1hQI4*Zy7mbH5IJ z2WDjMpFREsnG2e~q{R1RL6oZ4_w+*$RW1KlAY{~+FyjAOD(CN^%U0;oJF=qsQeZq`cA;OlGfC4WZ%v(+F9_u;6G8F-___s}eHGet?|w;3ei zw#sieihLaWfMyfeQ~7@ZcZ}#+xL}LQ?*O%HqC-rR%;|=7K)`9hzft*r1KFJVe|?aKUhEw^5QrNlZ0xTz`lI z?C)3kJz)PI63ZV55+#xllWdhuA3-F0jSk)tDIDZXGQ`%_8xf=O-#~=%#0cldg)m^` zpzlG1hySSjKCrj|EFKDCAOZUU!4ZE^xXQ5nVU<4shgEQRgt&un%#go>kemeBVD^`U zdXeRis{DUIO9l4W7#VCHSNTIASbqqD6G5T~Xxf>zBpwDRJ>!MbTmq0InPbe!oM=tv zB&S0-ZSH9nPKU{+?!rlraEj$min1fs3UxFz?bY>_$05sw zapDM!oC5{UWEEvb>!=8=qdJ&nQ$J8TLHfx69bug*iI8oTKM5u19WT^^lUlu#TC-^? z{10dh-mKSxE@|-d0F2k@Ho5a!2a7OZe zvqdmFyusXpxd#-VK`>2mLbERz?>0E0i$mKEdh^Y(!YyL{7uX|U;d6-NcARjB3zEEX zn5K0QT08gO#)%?4?8rk2uphZKVLx z^7>H;UO{kSml?1$@L(55-&V9Cjd!7vVW9pW@3}gVgxB_Fs!Z( zX3fhPNJ+HA&OmkTj8@&>p}K#t{O@FD$X5A-EK1V`>rBz1ZSk`uBFP`wsbqU<+xr|? zd5P{+!=H(*C0n$`t_I9DSdC|(2W+g${}X_25?PyHfG8|sZD70#BwsjKn<36_gwb)P z1lj}m)L>_Jlz-hn+p0ZubY@4|1G25G%|A%Wl2b^EWhaDEWpLOHVixshNi}oY%fS`^ zIstIR+KjPw9&%*kTNJiRE36Hvd?k3Vkyx84&K}4Kk__FWZ8F$W)>N|FQz&jI{?5M?ozdReBu>D*rAx+6;j|6K4-@QGfJzp~CMu>>;EaA*3AkR;DmJ zXtvTSZGQ&PiS%9gE?`M}Dh`gd3&dv#oRIuN8=eO@UqLo4EqC$-z>ywr59Jcoqj(Y702HC)k$%53Yd;^RcSZfO$ z7~T$SJ5n!MTUzuhgcyGwjDJszTeP7!^>faY7@0_~lt{AXpOm$cm!>fs!!X%po`LYg zD1{~P%asro$R8*G$PdT_6i9gi=o!kl0umYTUw<%A2$fraLV;MS)0*nE0cuP6c0lcQ z`r(xCpwl7p=YTlMM^L^K&)-KJdlU zthQ}fnNcFAvelj>KOB-SfAHnRb}zL5_WX$Ch?=V3CnQd(bx)plc>RyR9KEtgjXM0y z7g1Bn6OYdAe{cML%V&EJh0i~=q|d$?yH22H~@9 z-KpanXZ)r6nRB0Hxe}Tu2W@z=A%EMQ+5F0o4zn(ws;KU@)iddv6)!It+Wg(m&nvYT z!f$tKj7-`;C9LUVm*twD?bQj~f_486DTv z`aT=h*sZ$P2U7~eUhcT5YDClU&u=MLmrOiW(mBQ2DR8$taevI&@{Q5!4+onv z@8%4?9=|#1+2cbdxBK)m`+xiWu)#gmgnkRNYo?y7wARnY0Ts-*mlBGl<0ttwvm!nk z7k;i`<9*}%1(Svq){Co(=8tKA?fnb>$G5C&UNMNvUALvkvvt+3adVH3u72^z@q^PV zX8CW}QFZTMY-9QKHirU#*x%;%ut#0SiaVD_cSsw4=F;+xXV0has())Wbwr6cs⪚ zj#KxxZGC;pU(aVf4xj$p@{*CWL#q?Qj@xp?z3*OpZA9;7MJq~%aH8c=#`d6?kNdX% z(QEh7lZO?9bJ<4R>K5Wo~ zrSVB`O-j8NaAsYfCx8BRm&TA6P-bx+>AQNyySw$hshL`i#n(fL2oZHJ}w#eidjLmDm5zoGqp^cot|O)jt8G zQvMiF71ciiw1D!5=w2ZF0iX}5oqa&_D8CnI4xQy4l;>!k=6}+=HI!#*KITw4*>$t& zysx6XfyO$E>X7)}p?Ar?eVgi#n5I$wBcLhNFYz;x^2COR-hB^f0_Dkjq=53o#yHB8 zchDHhlXuey#=8?IP@do<@4g{a4tt3iOy$JhYm|2by-M-t0}Y~l4$uIqLv&xEa`GPU zPkAEC1mYR*H-A(*e6=^4peXi`}j z$&c-w@g(Z8e)$YHv9w5gC=!|K=jSgxbEQEs8Dv?Mprm7h~qQQ|5c%Xtq>$%B;4mvROwFQ*nTt-3n8 zbaOZz&K!r+*)=xKr^LUSlAWXUOY2|2gpYBTNnNA6MR&1xamL%nbnWWQ@6o-h9pIEz z{Hfo6V1E|X_v_MQoEFnWm!}khDq#G(MLVLML}n;+7rMpVE6T+uGic+zx9qe{v56O#$? zLtcMj{`_Of_g^DnLQp4ULl|{Nkthl|P#4q{Ie$?%)B~lUSI_`71Pw*Q(Fl}_#-IXJ zggmGem7teUD#}28Q3aZe-av1mY3ObA9;!rtNAuB#XdzmJ7Nd{Rr)U{kj#i+RXam}a zwxXSA7dniNqI0MgT|$@9RrDjefo`GO=nlGv9->F+H_TuT8*v~G!U7J)p|~CHfIH$& z7=Pm^+zqGTRGfh`aW;Mp55dFn2s{!G!eem(_TX|{iYxF`{5GDBXW}`y5`T#2V-+vN zi}5FTDPD<};nnzaybiC$bMcqB25-h+;ji&FydCevyYPN|7$3vO@kx9JpT)KK0=|Un zaRaWy*YFK|7vID8@k9IoKgLfKgW|6k6@OkaD;6b8X{SiaUzARYO?g4VN~994bWviI zIHjl3OG#9cloTaH>7(>lvXobp0m>ldHD#zWOc}0>RC1M3N}l3UMk{W`qm(HX%4B7# zGEI3~nW4;5W-FD-`^r4!O{G*pN}v*?2)H}$fqUb4+!Mv2c$9#8qF$&sN<=R*AtL|Z z1W@4r0#Hi>1QY-O00;mWm2*ZQT$+yi5&!@Y9smF#lhJDylTeKge|=dCbQ{%~p7BW5 zy^1Fpd*r-GoN;7uVk3JjDT$TD$=He_=jE6vF9?YwOJiAyEE!48!z7S66dWjR3Voa@ zrKN2)z$vidwY;{cS+=mJuwBwnXen(8?Sb}eVVlxIX(`KocVaA_xiv8{U6`| zxOd{!8#i@uTm$Kee{^r?E*FM5&hZ?_aoO6UK9DTNQ-!^HF+N((=1Ss-UK}hGhb@|O zFqtdqqP4H*lLI+DT{C$mKbXx38NKW%7f1D~!X9TTS19RI@`dswt79;m(;fQ$Y}sjw zaFoi05l1$kE!S8gOL`GgwO-66^BLE^A-$+)OLR1!)(5kBe?9Fim6OFXVN&wRVcoI2 zkj*;^BYM6jQ!I>*#B+Kwtrw?Q2ASr&U{5@?FAXJw1!`^N3K;>zsTYfd;#7#tr<28W z-2Bm*94Hjau4F!$J20j@bJzDluZrQWJ_^<6qsbk^mO$=62@if zhLSchUe-(H_~2-MiX|b=922_QyWEUr$OK@jr94#_9syEmI#Seg1#F~i@`XGMg;Uvl zW(xF7vXB8iIULVr2dYy;qxn50F*#5w7n3Pq2fL;xe{e=?rRB}Ys8r!aQ-s*!z}^3bR3tG>Cu|Ht+AfUV{q?RNabUDa1#x%)A-dRQMG z(2FyD7c!vT@5`l{cB!yQ^-*MRF*`6?)~gXzL|(w8`2IenzlQYeCuBAJKT#P;sW8*VIMAGGWmSt+>Un^ZlAEXkHxjYT-b&CeGv&+PEO|BZgL0 ze|MY2A8!4U@GX;ayoQsEewQS*{*y3X$I1L%Mt`rspE43vbx(8rX=9|1Q}{E+;i|fK zRQ{YQdZ0%1%+UPr7Db6auSGdXAySQ^YHkTi9h&Shj=LnaRjd`J-)4Ieup41wr zT%2N@#?L&d3yIT(r5H+&z{x#=NqDCB1Q;c!OCG~4jMK;d4ltf6Z2YY=b zd%Zla2?5O^YhD2e8RuQfh(RP5cUV%Sh*0I#qN=R9FA*3~J+66#T8}|$9g_H2O~9%S ztBIfpprV>rb2rFZRbWElH1|B5kX_nc2JsN?6vq=e!)bzS3#fM};t|au)G2V=f5Y6C zJaO9@(kBq41>MI0rb%bmKUiW*B5fRh_#L2+RzMvt%;_WESFG%R+2M z&7%dH3^Ehe9m3?2PHTe0AhTT385Vhu%#4`jP{mHINytd>-ypMH$~l8v!d%y+NK>?5 zjtH{AC{n)#hO9*sAk1+o=QZ~Nf0QDCZ55~DzFE*z}W;9>)Gtpxz{ zaGK4EK^g%>i^P-0sj8&8TM>fGWlezE`7V`6A+ZSoZ@Lh0Rcw-%$zszCe{Ecx?`{yi zJedzBFPOYakCzMdc%?81VGzPfPne7Kgr!ug*c51jJ6g=4r2w24E?uU1yew^yJWaxc z3X7W@ni#Zz5?BooFFWD||D`U8)Jh3+C*l!=NE1%6T#1f&anwqd%aAOZeUjPF9K6&Z z&2X?C4qjFxNoEWeKzXYwe=cOeEo8to`^5!3Y1X2LWq{{YL?74})Jc%DP*SDUiX3&T zQ9)y|TmVZ@MJ)zd2)P|#Y(?p6Za?DRG6E-BAbn(tlgN`!TyBt7um+KZz8Xb=39K~9 z<;*2jRGAg(jD&lU*ydg=w!4>z?L29N9knpk2PRKQ>_7}UDvPc|e|`w%)+MSuX$LD$ zR4sa3o{%Jiw7C>g9}@jI;;%g7m&ZXxoct_KQlL}x2f72{7>J0K-;7lXu_}Qz+Q}y7C)DWpltF9dLO$>MN*S{F*e-)x~)T;$p=`A*h|BzE9)4jFczu#SAkO0K3fVZ8Zs;DSZv}M5) z@kaY$PH?hAVZ~Zh7hLbEbiE5LDtedMpjiAD0ihYK;&Ne}m9ApeGOM%2pwA46%W!;| zdEATROa<8_EQXLJcPZpLf2@RF${M;DvN}2mfxysN z3RX5RgGx3phe>Sif&*+`VUT5Tw;S%R42o!qW_QwRRMl$hsLqVanp^Z7d|r++ofQ%z zusU+VY3t%KNEbq7_8t#UXZl&bD9FcdG)1u0F^rHv2I;XlyKPjuL1A9^Kn9z` zkk95-FpA9)xX9+!1_=X|Rj9ZqYgGUt!8)-C!YT`dUVza1_aHhGwKbBW$;h_#DB(F^pDwetLguo&0XIR!!IZ5hxEgq}&LA6ba1EC0 zr=m?o$qcaffxW>@*^Lk~FC=b&e|?pi-6y*n*k$dlvfCk<>9v|+Khf@jv<={~V(Ks* zoQ16`e_%AW`XH9I$c+Zs1l{XkbbqUOg}~z~ZWh+wB5=57m#+kwb-K}BB$C=P%U z=n<4KE0QptUC_AMECBoFa0u-7U^vXh))wFZDcdKnX7?3>8EtYYPp(9cK+7iVUlkIs zgqACzMPgb6Gpt)#STP+$23;ly&EgKcMc9q8L$eoGBPk$aGeqnRiC06!)q={x1`7iq zf5;%nN{`Hidt@nQkZp&Y38I9(!sId*kR>&-TJf@8h~@>EyHHP%xdk~F4~a?0Ofs_| z78kD|g&{h|nna9sPE8H~1%qr~r!olb^4o5eU&1cG1Sl+ZlNX`TZJ{t=kSn2XE7YZ+ z#k{(#lDg`Xu%af|^$;ZuIKIQ`89Rt{e;Q#;qkQ4h=&>%Y@b30^aLgEl)4l$V=&g?{Tz@gkJ?zM3k1rDR;E6&8Be^nffp&5*Z-N_o)i2KF(l z{k4i?vX#JN*k%cQ?M3Pmmb!y@xku>;e|=Cq$bdYIFA*rvK|0bY9<_lS2aw|zfCM21LQLt2 z;fWPvPpt8jyP-xotjS)j!~(^NHo!h>)+rw+#t{e|x4=EZ6FkChU>a|@$j^Gq&(9b{ zhZ%!7aigRu5xL465xfjUe+5i~ead;B+=Oo}@MM6+M5lPOojk{oJjYn_NU-}@06zqj zo&f9)umm_39^ZfE=?DYl0^?2gRG1b+_Qt4oZfg2Fn(}&S!^D`S7D%ODt{!W`7!dO~ibCXlo52g()5B#=_E%utT1>|N7h>yR@vK@snrAY9ZtoA;dpFwW zxau3z?Ei@dZ2AFP25-~1+w_b*Ptv#9KkK$FlXAA&Kc{T{f0OhrHa*T+d0u7HH`{V{ z+CES6Z?frEVYn*j{Wg7@?Wbwa`fQoi(Q>Hxm;GPc8GEhpqAI<2vVQ2+SuY zL`%=NoTF#Yycy}$CtN2Rw>G|=ef+^!Z#cR4jn3Q~jqM-)$6wwG?s#;;%7d>R`ppLi z_ch)Sq>WGge{uho<6F9>r*~$ry|jMbiA|>;?%Mc=U9)Z6A@R zEk5?8sbeeV-TwC1y+7*Rcyi5G@2T}Y#62K9zT%4yG;TcA@#t9hZD0J(tUHIkaJPTi z$%A*@8c96WKk@3lx4Dkhy>oKzt82#E>$fexQ(v?9f05po#(($jZNK}*eVe8}`psuN z|GF?AJyiYQFRqGY+tSfDTONG-rzfua;F;GKzpp%eWAwe2vro_4q`bLg&vl{SzuSKO z*UlC`eE9c`rH7tA{LF2)9N%!_jo{5s{NO~_E6=}u{=>V!d;FE}pY;Ff$@hED-g40Y z`rxxme=q&7r7IRE1mEB{U!1PIaCFL@cl16z=Gb$8d*GILr@ws1eZMv)O7|XFu}TQN zzWoa`l($a3pW2?@cOX%{<@N197eDyfJ3oG@=79(H&3*bM{pEM(7R9f3F8|~3k>`GT z$7ApOdEGt2SEu|!8=Cv&&x?QSy!8iLzd5#GfA;<3pF0@+-T5CMEY94RSl+hz4{hJ> zySX`d`2NSP{mu9KuK(-a4{GP!zB+X1e|?XSx`wN+d+9q|OX1|qJ0lP8{K|)W+q-}E z-HlIfc+;8q<&ozXJa*l=qks9y3tu_$KdClm%KT%_q zbyZnQmGx6uGnI8xSsUx@HKVPu^|HRQf7+(Ap0~=TvmV)P%V)iIg}r9IdAVK3L3^;X zthaaAXW6~gZm-#0&|R8==`<&?->H>Gk|{kMFCQ4uOGARS;qKY8 zX8o%8<~184d$?(SwPTSQPy=x_5by^(Cz;6Ba4f#2Z}rAK+?-6dtoYj&wRQM90wG_< z?+>JxEcW|gPWiyc{2Mp-+x%4>e@k|oKJC?uB{;Q*b1rIA+X9T{C})TDp=xB4>$Rj{ z{#BBy>0qfaT1@GCxJB(-N;ud)Sj?7FLs`9jpuJQ~wVS`}yRfSKrDmI6%zvW^cDJZ`b+v6{S|$KzD0je&(lBA_voMLU+7=y z2lONQ;liqgPWlf01AUX8Iio+LFVUaSm+ABLd-NfCg6^gJ={{PZc^an)Zq}!Q@qbWD z0|XQR000O87?pEI@CFMc&=UXv$Q_q)_W>7^?4cNcLLfi}HUx$x$tJsjJP1r4o8_4V z$b)B=&F<`G$v)!jEJ=7qflBqms$g}jRaEfT+8>qjRsE{fBG&q;NaM3wZN>QZt4i^Q zt+vW{?#u*M8h$r>ALsnfIsbd^xpzrR+nNf3n0~6!+pwxZNyZ7nTuBf_1j$lwK=ua} zQ&7o&4l4doRL(J`l8JCMLPsRUA4>)UF>4TaB3Qy4O@x!?J}EX}iYBweNjV-+I60m0 zD^g1FN5lRBSxQL>#S)Ijq*$~!JDS33S>Zq|C7BgDEtv-ca)Q64R#=2U-l0F7J73Q4_x>4+6Tw@GquG9^(`V%r2AEUiRiDO0L1 zOOoZJOb26-35{?BFMM_wE)4;4BW9Gt!rc;t1 z+>jM*CS^$gN+1-cwH@zErW6+AN<)f&8~}7V5Km1Cet(>!d8+tR15z-hNe7d#Ek^n< z`T&>*k3%`ZWGp6ua3q%Owdxxdk_L1P+|eTG4WzW4%1RByqKY+~jOpa0?6MRGIDn4F zlL?$;;pkAVVPChW3F6FBal zt*h~>5gY{9%=p)YcruiZNq&3~G#eS?g3>ES$}3A_!QviQc}1zKbW%8-2r5xCkvf@p$&q$7w)4yYY0q5r7RegKUpS(P>A&f-Zv$Hm*sB4-zU zghQ<4M5Ka8YV|E6K0S(Ns-)=(=LJqw_S7;i=0QKn$-ad%e$VT>ZoUbCW~X~3;W3d zmM6T;CWq(~xdZKlXk{sN#Nu*^R*uRgIC4bvwR2)KrQU4eMfkukBKb0kCO z=05W=?6XJnxtX%rZN)Z^tyw)`7C(rZbK5!c$X2uSVaht$`7mcK(DY#Pgi6F59PXV` z2bNRhsCvqjYn>vJ=W^PAT{g2VBaTtCiz%yN@~K*AJj=6<6k%?pSi9GZ{Tkpz>r_fT zhn=!>qKC41)bnPZDZrE>*1~RaF?L{qu}>B+TpVLq@;sLFjlil60$JKF6-ejPI$+Z?S9EozVSH)Bo&rEM85vN0m zj%J(Z04u7}Ufinh8g&FK)Avl6vo~EK{ z;Hv<>PS4>rUnzTpz>z|{5UGoCz}gHIO$V`g&@HNAW^lEC(D+7I7@gW<;I7{2g z5}>UlleQAh3UQ-cYzJ(r$YJ5nO~^V=kw7})^t3A0<^Y$ZoJ_I#0^Z(c3Qm-YW`k#Q zLDKHfSjXglW5!D~#^>ma7dx5RRHo8gLn-h3N`j^|Pgj~P7VE~HD_DHeP>G7>W9H|m zs1(Ak00!n_#wo2dnQ#>OAYNwSS=c+9Sw-dA@tKd0&wQvNjBtUOM^ytUx>qF3r=ha`7g_J(3XDUeK5>4P(1)#vnG+3ztD|RPSNsYn8bgG_l zlc*AZx-&q`t!;fR_?icNs;CZn`H*jz#X8umolv;E9tu|{w8+dd!=P6LNmXy*(PZp7 z1Q-Gvi$PX&G9JoWr;V3@Y&G5?MUP8_xY$#ymoib_W~fePXvNR~+H0pcoWi&Q13C}V z4S-&NKUg#cXTD}cbO4c!V5b?|RY0r>jI73gaVokK&9Jx;pC3H%!eSWH1Dae&MJ+I{ z$72P?us=Q+cj;pr3|kB`LzE4Ky~fB6Hv`}rBc1jpW2BXA)M$)4C|idXCTv0#i99%V z?8Z`^2!~FD=m1m8uHr(uiVOWNQVcGZ>s&0?xln)$TZe|cLZ{+#fGfuX)I(TtxtigB zyRi}zr0gBTwshSCbxJXrE09uH+pUC?f#URa-wvlBo&m=3)jL$Akw(A{IqIudp3 z&}9G}*P+*4fnE=475&*ph!rZ7oT~w5W#OiS}n{fVW5aj?dphIkep$m*(1IK$N ztZGjV8IE)uZ=?@ZP?S+>Q?QkjArm*NNWm6BxhID;P6@tWz*5@cLbIHVA6v3^aoQSw z(QQL84+?(G*DWd<1cYvX@G)TIvJX}2nLPBpaxpYc$*@jI2vZWmS84ABCP<=QoR50? zy0zl1y|B@C?TK9gy&a(EAW;zTwDg05G>mDIRJ0RZSO-3ZFCX>w5aljCf=I?$kLZWd z18}?b!`P=1pu7QkQ8AZNiB=XBlW40x3y}R-LtEg{Y!bzQQCQi31o6hJnFIwfH&EL1 zN~bztyiDy-m+f!}Gd2M3Cyla%he=ZKnH)b}*3ICK44YO$q5;Tu!0&-tdQFZfiVp3Y z3|9>IlZy5L19S9`8ODV=0_d$*u9a&6Wv+`+Ft-ZkR=&WbNF-x!J;hY62zFDV(SfRc zEhaDxg$is2Alhwz)yyEpfMZZr%wA4A`&AS!WN>5Xd#Q!0)wj%`20cs~2Q>tDI$-qz z)^Ihm{c5ZQfVJo zBf-Ut;D|@?0oygk>?YAJeG}k2#7-z2yYVzB|{cXfJpahTYlAZV};CM|s_F#d2C!Ed;@RkS2vB+NYxZAQ%A}_Ft)@)gZ7R zzN=EiT1D=_Uyzr@2jJ6$pzDNYhL#15f<{BLLNh_jfyO{1q1mA28Zvfhc^NYK88U@N ze>$`ohRjTVXtOfp9amwR4f)x|_jlYcG5T|$%{6r4+dffd^sx`+8GV=0$MTgKK6nfq z>_e3?zW~}oqhAfJ#*o4NTBBc@;q#&lPU@j8Hgp@HHD>rc+mKlXZMh+X?XJk^uQd9Y zhgAmexZiHfW4mh&9L!5+2IrFv8QkA^Rezj^q72S|{e}$oc}s@>p{wLc?-0%hjVo+( zgQ3@N^zly=nkB<8(|Bu1CpwI{u%8h_cfX-WkmbXD@DVoj_8RMyG2dg%Uo(z}=T}A* zz@)Axi0nf{cf~v6$yZz01&7VDNM&s}w&tae?f1^Bt=oUY@>LU#TYtWC}xC2vjo*0wAB*^5=BFF*I_muKAn{pBNvil6((vRgORZ7k{j zt+M!Tr+cvao}YI9vuFLLvO^b7e{}5R3!zmNx7=&v*&RcBU)uag@U@bT3qO5yR&rn7 z!y8LBe)4KT+t~^KmzuN5`}6z1>F_XLE}?#Zm#!R~Ks}uQurhnr$>Te3B))4|y61^R zRo(-?Jb3o&d2*@c*7ReqB%6;dKPk_PbrsaFO;sN3Zh8EPRxbVGvrj%@+hKp~=| z)pkkFc~<(wO=mZMcJcjI$2$+*^2N4~(Ei)@FZ}ATXZfGjeRA}x^B?~B!RX<;e{||w z)7y2A*FACQZ{K#@uzgniBKhDwN4|OYx}QAt%v7CYaA!@hhhy8>*tTukwry3Iot`G@*mH0619DB|ACm;I()~K_^P*Y+9O{qnhczj!ouzKR_its3 z7&WH?FG64V7ble0D-7~i0Lki^RECOq1J%%vi{M?16EvTu)kjawb-cFwfLDiWf^?(` zyiEsb;;05eJ7?jd+?Q;%N>?>i_Cv} zb(kLH1)t^a#ud*0y9P_`0W8+}?(Sayqx58;QEbp3I`aRT-oE&t9?-9~1^_~qvPrg?CUU0$)Ud zZ+(dy|MYKWNBw0as-CMqbE*F|ZtI#=p{wMgW^qydlJmXP>NM&~1JLrtR_4okg1tN6 z7bfNOvNy@sGEkd4%jIH!gNS(PX(dze`#npKPwhlu=Q&GtwuI+br+H70_vmd>4E*Z# zY^x?kac+H&^XD1IiQLZ9??)0DiYspS7t9cc_MWR2c)qLd&$C$x`|p>5>Dun;Yy8~T zHO2GSur=joNDpNB9Zs0fREmxlVGWIO*s?Z_ab?}PK7@Pjg?WW)02S+gA9pC=QO8GPXo1z_5Ro3?onvy{VTbB1<0 zmUqPIcxwhkXjY;xJn zWZON9MqF-J5zH=G7i_V+@DzuFvs*79@DShr5Vfw;%1$Q(Bw?CF?l5wfdIa<(@PU>q zNEA5#WqYJh0yw<=*(3pPNU|bBOL>QGT*8st!)D%4&@7BFq{T}V9!lw*v$QEd>bC~? zU_*hG#2aAa$1D3L_ZZq;>b5kj@7CLi7Rx9i#WPX4QTP>q8vs3~p!GOMPu z`y6VLRNbeUtel{hrrBaENtlrKu-p%vP|;FB@-=k5fTa=8aWS zyQH+=W@|-^l=Rq+(N6LJOTUjo>T3H}m}(rqTt-PBcOO%6Y1Vui zok-6Ts7{b_wWL^{%2B<}0h_NqQoy&3M=#D^` zN5Ly40k{(26}2vaJ|J}m?MN9&T$gw(dY>Y{0r|%BixQLqWUC(V9&#QOPCL)MPe~p= zAC%s(K3U$}-#9;I-iY2b9;Dus-TI= zZ@zD!Zz69bZzeyEJ>pNQPsF#p2aN}ght>xV00_|!1SFmC2r4O9x1an7`gK6HKyoj{ zYJj#tG@WD$9n2`VPEnI|2{tN3yq~5Cir*+?j=BlDDx_IBY>w&=>Iy7Q2%WItDY~$* zXgIXf{;vX6O4Nxnjnk}Cf_VYbwJIvGz%PRT?+1i!{~Xq%2Lk$#0ct#G4yFH(m}7I) z4f&rzb14A%A38@i6bz`83i_o4uk>w%d2Pd?nazarck3zB7Ws)-IJ(JxylhyH%DqA!= zf6C_N00`@Qy{%>EE{6RaUp=siiHU9eqeH_Uy}n`cysqw~DDBUH(XvB7@+EuTTvK zDCub{R$ZvB166HBixzz<^*8_PC2791CS}Vbz7pN6Y8n^&l6i$P-W;!qNB*%t+|vce zi5q_7JbRKLCZr_0e@^h)vY;J+3E%BqQaTryKILjZlJk{Li&H+k{5dtUoyE8SfTyJl z;Dz-^FnI${eegf*s(X*eGyKU}+>={nFd%XxL=d=Mm{50184+KFu$Y~%ceJF6)oySFb%zy`!#VtCk2L8kHHw0WSiYe>ldG?Vc;_ryYLR7|J^1$h**=PrZ=(a}5^) zJuK_U5kd`%F1_KzuF`T$x{cLxPFF65NByy8yDP3FR3;q0wo7<^ue5iAfWB(i(4F-eH&>vsc{*4l_RN2vN{>QF5Kb43ECZjk^6`yomzGPuMBh}zJQ+wJi3g9=2RDD2L zx9nBVtK%NkprV6CJJlXCSvW!&p?NZQF@@xIqDCr`42D*N(d=Bkb(u(cQ_QfsRi$)< ziao&nq0#(HEbP1>23k!fcQO4UC_v6bU#$ITb5y-rh7a5RiyRRQ=SEtMUA1o?UpD0`W;sJ#f3H7l=@E z)F1BAzW2r~%po~6L~2^5|2R<>4K}p>8oFo?|2r~R0O==T-SJg>^KYF9sThZ&In*Lu z+F$c?y4g7B)|nIW;aStj4*ACE3f+7gX=Jku!${1ndE@w*b`^IwK*0CTD4ZKu`Rc!3#F^-;IoHMosEr}) zsW7##cE#YWiki*Q=m4ThGE=?|n70z)&qTeDRT!xJ4f}2&ajNPYT%@kK{mfhetXwe% z5s&2Uw?EPhU&7UAQ#8{dp_>Zf$3H0gm;yJV^BzLH%Rn((HS6?$cezd{*)(Qgmia14 zP$CBH8-n{-GNxmV7-}`Mb9ONJO}Mm*nQVMzf0Nj&gpWI}nE*sZda|DfA)=!LeW3%8 zX-yOrHn^sv1B*t&e>)pi-TDfYWiWB7W(MFskj<}{AH{UEydgbnHvXVi17G*@EO<3rdigcE+;%Rjr5HTpiRfuvkyhBC)>u*Q6NCzO6Vbx zi+9^R@zJJe!o5F=Ywa}m+p*xEBEn3H;V-eA>gK+GdGs14Jw?pLTtcm6cC_BRJLAm_ z*L}3Iw3rw>d`gNUWG#s4qJUy*(UDI_je~nYA*(0<-~tNY8|*B0_KlC^G1bja-!Vy@ zz)&QvT(K(~dG5cfh3f2~)vA{7jnR>)I?Tb1I4T98#CJkAH!7pCZnkw-Z$yL=}nKkk!VmOWDRK| zL|c3UwgEbE!QX+Bq765^BxeCTNo&^9fAcHubJ2Dk9J#R5m1t1XBWr%NRivo~{*6(# zK0<#eIBoKdq8p+gz$i=vpxd2(fCpLLjzq+JK)KoLc#uvZ?|0%2SP^GmA!Tm;C7cRy z{1qI-zF%15B8Kow)XIRv@rtc^yTcTG)f*)*3k47dETk@I>#vBg6Ps!7s4~yO6QuL9 zjj`w-j zhCLJkYXl|>a*s!!Bpu}ic0HsgHx?02bPUqcK~eB4m6eav&gpj|jut2ArTrwY8^57V zUjkrq&<7=YMf}m~AW3eYEjLC1!Zyd)YGf_#E`h?d6{_)%+`0wcCOYB z&xz*PlX=)X+LPJPL#(iCG-PXDKQ83k}^?*7`2a!;( z9EI5#_vtMmu;*4Visx3~Z_h1+HS;#hvE&|k09h4Ib!~KKOzq0u?6>a)KxWtR-KJe( z^x-P135r#yUMfvfm*bIY{R;!ZESzl9d83Q2UbL?T#3+i4DQGJ$IR_NzWTA zn&Jo#s1K4n=z^BbyV&(T4HRtd-?{~rvt*G;w08uF*5>Y=%O8#Mla<-wEUpa7U4&#M zGirDU^^~M<^_!$|kf z3()cuUn~bxr=9>b^7Boel|PvHcmAOX>+>}hMc{TJCd zVwKW+O4_lxC(bN}qY0&BUdop~h*WH*!M|dX#6P-DeSou&>>L6&^brF&B7nLhVny9p zq|`()4tYz`Gk}dcg>tCD7Zret6mpL6HKl8Zcj*TeA$b zerVEoelTl6Z3F{WN?}H2LcXpQOxXxJz)Km@FlDyZJjVsGDJwGZrUZ^e#Y$iD4u zoH5k1VqBYR88Q}z{iRK*M6er&5oSQ8tUEY$jSpLpB4<|pWPk@;=ryk3f2@%f0-4v} zlD4ddC&0g<;%Ps2EvzBQ+zAa4QIP`pJvDKXOj3XAhLVp(C{w^-WBtx{LPc zti+)dnLLe{&9#%0#U+pZ7h4wS zlu4zySO;ZW@T6tI{{Tf&IPx-;=CH=@^|INEDj$X-;3$+X$yiO}$Ed&v`s$>^_=#DKK4o5_z5k5y;|P+n35(+N@{3yT_;fv`iaFZOjagvuqQ zC`s`QA_JydKOBPZHcI= zjAbeYo>(=M5A2)tDA_Ivzon4eFcQy`ZGF-+4SY0%djixZq-+ouV2Z||VMhd@D9v8a z{Q1D$q;Kv-nCy)txAB?D^XTs*g=0f=!Rb5L~kQrsI8- zblUMYu!DUyNeys{z{&8hjI;O8&A9rVxU(QU*g8>OCt!|T=0Xzjqo&j-bpjd?N1YYf zbJ4s@q74a6D-{3zE^qLV?F4FOaK+42)$TM5jv`Rb>39Tbco&g`Ut8pCH`)|J2EW9{ zY6CD-^AU3&u!#y@YP>|etF0d|akZEc@8it5x|_2 zKu44~83LE5vQP<{Q z;G>ntK_*DU)OSLO6VE7s+1`@orhz=2j~5T!DJJ1ESvIw)BPsAcx?F$-P0h$c(Ylm*f7vaM76c zGVk3t#*2Gd=0>9{GftUDJ@ZWC4V4U8m7V=5S%L9GGv{&32zrezu=R_=? zVcZ0OHwYZtibz#+`+uSs{RX9SA~V-&(uz1E z)t;<1JTTSvj`cBV)H`~a-un`3bM!^t;g@MW&-%xJE`2Oe8X>hfXWn^?=C^2R9lG$e zndk3iE!!EY?r2wmuDQY1aFGfv62g|7hfQ(+cdZK9eo!@?%Vf;3z2hPV>0U>it7!Znpl(< zQgr&s)s~by3wg<~cLP8)nt6;JOw#?7d~m7!#d(wJNLVt@rSrKaFP~Gy$17%C(GT{y zO!IQJYu5ZxDTp`_?WAhP-kU z6k2)gFi`24Q+eQ~gw9Yvt)H|e<2a%(!rHC2x>S89g7q*9j-#rK|^&HhWpc7vNh`Hm10;#1^kY8%x_p5 zu|<)}L6|Fi%l%Mxf&=`{lm6zyTE&5A<1oI)Q-4lQB{m-e^5lIBiC1)=^ZEYd(f#7G zoMjp2VAjmBVFMsfyua>0C(pmOOmLLhfF%10;Ff!7tJPTjf`?t~1b&yu_d}?-Hfw$+ zI?1bp*=4AI?#$CCGY?mQg8BrFp|}3|&b}9Zb>D=BtwLWi@c=9YwBXDy>frJZSmv7m zCOX1rh@+i2+J7byzGxjBj7tniSoQS2U94W9cijJwBms$lkX%F%(yCsBZ|f$@^KXj( zlOA%_fYe1Becm-gOy|?$|i#wyu5r(NvU3gmNN4A`>#9wA0I!!2k-O0tmmHEsjOBP>J6JZNf-3~ zKUiP@g(%j^AriG8WJ^E|2T$DJa?8B1Jg1Tn374u3I_Q7}UNJ^Wa{&6WXO<%7zj?LW zR%(dGS|PD_cz3G+mbqE`zw=%F0g=6X#wS&jX$L?@Af2oRmSmFmHdoe!pJuuwbnG=L zU+keWGSN~C%7@%m*l}N#*7Y>zLiK@ppnLfMo%A1f5=nY>!GbgmZTTsoWM`O6RB)9}zu8X-IEP|I9N zyhcI1hLn*KtW^k*6{7H0WIxQK(!7yr9s--zrOCoU@Kl_oQ;o4x&B6gM%tG$3OC=7# zAE6r-;LBa^L!W6+HWvMAYywa#26Q}*^kUlXW0ag@=xJy8M%^MU3`LplapjzGDa}hf z`s$F4YINFsu}MrT^UO)t%re17g|;SK*0jz_xVbVK7`F~(@kuq;QsYP2l4bNd8$2-l z>R>)Q#91{(mN#Aav&EBNlA-*o!od%Kc6l+20*e+Q74z40**aiqZpa(+&ZY$RIq|h) zMXT<4BD=kLuZ@0kD^yAc#q`0Ld&aD)~Q5!V-3n z=`#%1Yk>~s`Dfe>ObDSUUqE@2g1<#gq&=uG!jK=K?nF@Tq|0MAx_rwZ{jrvgRtfLw zI6oA{J5n6NTRPfsLBUSNX(vz0+f}^$Kl_l4ZE5S@a}PJ1e+u!ChW*9>hvD8j?t)`1 zUpa}rg9}RNWwAYbovMNy^?l6|qlTz)_owMAMX&LDP99~Z zpsqbVzbJ%DidbVoG>dq=l*OL=7KIqOk4@?YG3Y6to8`G=a#_KI%O8_uDk%@Z^sHO zT@2XBaRk(9Faofs<{YtbW(loNPN+t#hg045(!vuU7dXS33&x53xU9R=&`ABhpG8+U zHvKlpkPf=uX=ar9u#wul3?~^AZ^xs2K0}kBLsdC0BMi`?e48sKJG+3TJ4C_hR(v(qYo`X%kF#y5>KrYA=Ptd;UiZ&V%qKccqP6Ln9?7|>K1?-5xBat+q>!7h^BSPbw0kxBR zlStm82v*S5cTR#PX%(Zz&GH3$=_jan*cQ2-jG0hO0UV#+b% z$}zE+S%{(}w+(Ss8CIlfgeX$pS+)U0$k0xz^LE*~Y)j^aPO2)=Bw#RB!na+vUKE*< zbiWJ$=tkjxDWlkl530{k$Mu)4BvlCcg^U4bCe&sAc?J z;DxL9L7!EsF7RnJN;sgC4(@QQ7&}`kGTn86W{!T9v}~}hZq(@72-h@wwwKx@TXV>^ zrw*iV7z#~lOQiVruey~R`n%)m640&B;_w3lHvS|}JGw#$E4!&3A$D*C_|AR~-_?E$ zC+=#<2%cd$J`UJoJfC1Io|OqK{YmxQoF{zQ4Mx%Z=Aq$VXU<3QS?sMW>RPlDYkw^?3SiFH* z>lcI|R3Mz&>+0%ZqY^ey!4+pV!QOeO9KcP-dMgSm7+F?bEpHTTNKaCP(!w#Ab zn*<_JOOjewkJkd$7rMBa4U(^iJU#f}{~+YJxt*3SUe`oba|{;yy`TE{)+_?9Cy!YF zGPtE1oLS>nADXzf`%FEU1RSJi-`&dQ0v(yQBoqAX#pIjf@8coW9)ZiXqAxeu&l-)>8f@4Hdw) zgh}dj+V}I{#jA&(57qnYJ{|e(zCxITI}w@ME&%K7FH< zHo{qQC8_wO#2Qo|HoAUz>Dhf3B{?kIW?}b&TUNz>huvOvw7>r2p87t2!Q=LGAlu0~ z1pc`t!o`ylr^dX=EB6;z75cw%7Vb}%4!-XEd$#PV(Yp&P%(JGc7d@AVdk(g}W z$U$|Ifw5B;xG?vtSSaBAmt=9etY7Y#fW@QJJKy9{Nw&NA7H3(;2XvXQ#nu~z7XP!> z_Vx?7Y~GcP8s3Z(dGEMaADzI+Q4>CS4sYDJ)lLuhs}Q*W_)%HXwZ*$@@S{J^3~s5y z`=$Yfz|c>tWj9WVj1TIvX_vIFBo;g`aetOGwYTXm0WXRYFo0<0TTX^D?n-+ZRpaba zF!mxU#%=`E>B^_`XuDDF_9pEmfB(&BDRQ;nZ+`2^X-Gug@4GyK&)YOJsoG+2%tpNN z?qLZoI^619T>8B2ORdMXA%{#v7*0Q8dY7m>ANVttK2NuUjm6fUiE-R{a43IMMp7QlxFuq_eB5rWD8i{q<#%ToNSU1nq}&)i)r{cESMyR}6(jsc#k^QgJ; zX;D3=Q2v_{qMP=WiPUZPettAJ?N53V!1yw*kyZeg?h5%^r!Vvcr^sC5+RLkA?XFUL zl)-|%67Y0}L-K^}O3rAgpc_|uFAaDnqW9jsd{Vj31;}zK)meo4wba8-TIV!ogZ@1H3+(#VJ;6?)iO9lMdyt=kAavW3TSM zn0WhEth!4KM;TH#2x8)7w%oqo2gLFc?zAaJAs!K|y8ArNjvkkZ=eFA9nYO>aKKhb> z?Q7JmN-}7hRcWyh_XrWNJtkjAY+#=(aOi6)+h}xoH9n_4bo731n7QV_F1LEA9LoPNpjt3h2kTo~^y51{h1aW@1kF zIox+Q7m1Io{la(uR7Ye9SUtEb8s+f(b=R+N1N@!}@7LdUNC7l?jV)s!oIOQlT?24- zdwg#s>+ z1JWFZn{{u%-lRU+=I9g^qa1)Ni%R@!1=C_9E>^6}#kcxR!GnaC$9k zgR;Q%=i_lE@L?4j+oO@rx71dv*K&0}gM}hjCcNAGyVHViHnt~?#-28p=Q)T7;FIWW zt({_Qw$W0I{tS5?&e>aW>XTOhe3(0{$H`Lh?%lqqV3+G%LfQEk{lW3!5T(RXqasbq zSNkwCSHBRwI_vgH3mE&`1ULKRe|ONds@XO6>6UiU6PSFwf2Uq7a5;xrENFaWK7>#C z+u@gbapFOgWye50?eICZGJB?C%P#PTwd}D`OCau%MN{DW;iq7DWZ_I3PBpNCxH!4u zCogN*mX*II?M%0f1n-9Ha7?Cp&1(T1-(yj?|eC}7Gv z3{4-Jnx z8H+pXeh6AP`()eD(Z{lojgk(R!N)|z#6`*fXh!hU3C&?dBX>)cDHM4p%p{1kSra{( zPya*cB-yxf3=R%$Pf^Tk;VY6#bJq)Vbc5ttq$3iV4$55511wjJ>uAdMZ!4_gmyl)}f5-EU37LhuS}6MjV!8%;d;%Rn+%a+=3b_)3lwwEDf!lt|b>2%nZFO5qbt0 zgxf(B&+oeGQj~+*()?q48f5@4d^Xxq_|T5Vc#m*02K+fiLmetOU}RF0#atYMyT!TH zaVq8%|4_;%lt(Lz=#69<+8q)YUN=r{)8C@BNB4-}I`F&A^wjGj+Ch^Uk~l!Ut#(}9csMI^Q6xW zIynUAL}eJFxk2edsU5L##L$o9IH=~txgO#26qp%?ybDVwu+-yqQwyw72#+E0aH?DF-C>l`5yDfuiiceCoaE8nOk%pbc3O^8)P4} zlS4I6D*e#3!~QmbtANcts4WbM1H^g}U2=L%jc^(+sKfmIxeRfBx z(^{FM>#PVD09L&j8Ad-iPzty~Q5xqBO0d?N6c>?U5^xtqgE za65Qh+e^mDbR1Kj+}N{ZChS1UV~i;of`TO>6rIvnFPh{Qsi14c|Ddd5EXKeQ0$Ny^ zpwpYzWXUVqQK&ZXwvw1#l#bL=M;&K8{2?Cf=u<4{!fywxr1EQ*^DK;yU z841$T#9o5pwo2uwky)8VKVSomK}9`se->O}u~08coy);me^EuYI~Yu!-~y1bI7Ga^YE!>5Z(x35d?xBLd;@nN z?Q)SMbtRxNlrIp)6M4F#Ol;ZArbQ)2rl}!&NTZ@WapK7lq*qof`lc$Rnn^La&=<0ng^#9r8rhwJUYtqB@HV)NMzq|>AgRM7s(RqJKdNe z&l{d%eKa)Q!}v}PDcq}9=n`5b-nF3BCp3X~Qit=xueILgJV;Pf+*U5{fU9^8*q? z?N=nuFX6lflnU;Qj-;p6tv87{+k+<%<4D6@4w^s#5(C0}eykLfQ{vEKCxzJtNb9tu zuF%HMnq_-lKvV2Q?iee9n@ToVHKTOUHDv(WuBGE=cF9Yhc0w&r?3{*sN}n%c3^h^L z4kU~HV@9DsQqg!=!`d6DwdYT;ZxDpI~Q2#)GPc1I6py|KxLR}EvB+jVO z6XUL%`^}mG*_vHj0~dV-paXvt#WDaRZUrcsF~c-;xcSsTS}NpJu1Ug=MiXM?ek8@s zn$^WP^VK)odkABheO<6q8YxtfaY5VAS|Kmmk=1c&TDmml%A&5|B$QC-mZ7<7^_nf6 z>mZ2BjzM-?g3mH~Mu63R^AQ*{(3<>8%!UDRsc2^POyrM5Q zD4s)*qfE!^G9{d=Q{wCLK{hpsK-cVzbzR|o{m6jO%|GB>h)oVK*lHJ8AIQOjW(<2s zGX~1l^$7&1uH@8Zv3q;4B>xbeep9}q?X_W!B0n@-xLY~mcv{K91VSuGF}Ha2=7eE6 zvpK$Ul5o@59(6~|VhaUK3QT~$AuA(V6W0)AKA2*~6&;BF$N>gym=PIzA0>M_{#zh0 zCc)Z6A1@swXEg>WJ7`~Xg`X+YUPs_?e;^vK$Wp($q$gR_CfycnCVc+c=>d;Y24NrA zAk7OdoYZxQRHOF}Y{(f>XbysCJrXi8qBIXEpOZfYpFYe)u8fq zD=-Z(5h21D_z?s(urVNj5y|O6KVMlwA3;^gl!3DgaqN0cWmgQr09^Io8ZEH)51YPc z63-8OPI@%dSo_$p5$pu)!TPZdt z)`w^`#HTaL(H(vK9XEI6RS0?4KsB$Qcq&NDIpHoLrwKFqd<+1AyBR;of^a3M6oV7V z*nweb!q2CyI$KV1?PAP$70EEkenI~49YK+==icxu_nH2;NJST4I%1sZkCN*5c0cs4 z-7GKXbEKu&UkiEu51;$xxGjz6v0cBN-zGP6o6n{9lO4al+8#M-tGRyP>pmK!?ApIC z_Dh`9>Uz8_EWQJ(J`9@hxLVFdKHaMhE6A?9qBME{yC*dA{LXqOY#Nbeh}B*Ve(v92 ztpeMRVi{G(b91Y$Q>i;IOEa5@yei%{#C+akmXdbdu1{0TY!u(sJ}VeTtzEZOos)>Y z=)LZvr3`sl--h!|Pp4`QJ)16{>)Sd4oQ-a#4K@!ZZhnAAliu3rU0oghd&Pg=-tti|Trr2Hec)++mP~`&#I?I5of4 zT3T+pkYYLqFTUX?2|LvS=zg0Qn2i-^`s#eG(0T9W<7H#xOvZ=YCTro7_{Wz)`w78O z4n%>y!;d)}{V@fHn^!I*MeZ=r`pIgNCs!W4T!xLv7kreYok;ZRs*JO`sJ*a4mJ}@* zhXW92&z(zTa?wy{Ay*RG$HT(g=ptfgcC|9itguwk-b~3yiKgl|)G4>=hx)d>iCmjY zjnOH~I7aQDYG!NYXk~AP=*%MD*a#e-iDma%V)W3f+vR4yTxzqy$BnZ)!>`L9kV=t{p=FSBH3VIfdL(`x(K{oyr)^2#lOiHV@$yKU zj~^ac96{fq{m$f-$t7Wk#~hbAGP{HMozo?~LrO8mFm80msPYKtW0?Z{e-d;nEfa7X z(f=l3w4NvfE5ZI}@#q7q5+MK7i3izglG}j;0TDt10TKV`AEUK4`2vT7|JO<&2$=N$ zEc0;S1knHL-lBjD5&yIDvVgB){<9KFf$d@co4e0}7+1FaTx}2XzqvbU1Mn{ZqV=RRmvzasq1PT=RA;BkT zq;w(pg6J?( zm-RZvR^cxntu$%v$jkQ3Ia+Jv1)5a0#>?E5=0DursNH{zcifBAC`&7{QYz_BmeFzm(-d|(^OU>? zo0gT9AR5WCXQg~a`4*9ue!OZW#($GGEFa5_Cl@UNV>(`(za}u6(o`^;+wNVlFB}*_%{mDS_f?;>QD=U69O47{N{)P4o2$gGa&S1BEJJ zZ%yk-s z#TmFOQhLNFl5Oj*o2oWIw z6NKpoX$s#;yDco0_3WV^c-mAH6cMegt4Go?3=erm<3x`JySh{~mawiVOS=RnhbMFq z*aZXUWHm7c;%6)E0G;5|**d(#(D!~*`W*W1U>=b&%?l2Ig_!DX77RK@*GUl0u)8zQ0rUiA9qlLa;SBa+XJWq49Y-h#NPpt4a(hb51J$YL&*Bddm()8|=mr#H+HjhgN{=r|H!sL8iC-rA=G+QGa#s z_-w}={OaEY#4&V^)W7mivN>l0=z1LWa*3_o?cY%Vxz&*VmCh<+$9tEQ4 z9SKPLNdz53&=c^vOw-$ypvYL47#Zd1Y^MEn;q-{Mve;o%b=nvOO%7VQRa;|=4jWS3 zjtxuwSeTV@tV$ zMT_`yFu2`abyRT07J*wH8<}8cEE_#;Op9R$u=k}jUU5U1*!Hb@cQM>4w}zcBHIqwF zBwfMy6(eqvky=(n)P(DO4|w}309H^C*B;r5(%D;%f~t$LMa3~Uu}!u!(phI$H|ANBXYY4!{In(LOw}8$vB=U&i1F|E z&Nw&tfBu=)hgM(;;{Q5({0(5t|LgWSkAP$U&l5!tobu=Dg1vHA~=X3{C`6X z=4a2J6-*$YU{W9;y#F0y(p*tMPywmD4Q>g1=10hDT@6CM)`e&b+B$u>kzd1)5!r#y& zmpRvTX0_E=pbc|g*W^{fRR}FAkYt|6zBuSiwxwiAhcJ5|P%AG-f4mWxr-K2?G7C{Z?LAtoE98XcW@5)*{FSh`FFiwv! zm>C@H=~>5!qA?Jmya2iX5Dpkw2$bJ|ZV&Bqb)ekV!c{wvR)?nAmuw|VJ(bn&Nb$5r znHFZH2`7ukgYOV<3u$VB6xHX+NIwoDf_Nw_4PPW7ZsYBo>WSd@f7v?RzSb zXT_3rL^h1$lO<4Hsu>pPy%S8JYE_yMkFO{jD$^sYahpJsO^_vjp+HR^9cbB2BG}ba zMlUa50gpTShYiXUm?xkY09h<03h{5$5hz1gcDv#l@uji#)&Xq5ceC}n}`ZK>K(h4Z2%Qi5?79YB1mc%*Jh{ z8+{s>^F^K4p#f7$Bo+MwJNV2j)j*d`=qeM11LX?|7siKFMg)1mZzi{vU?qBJQZ$|t zb|l7`8izbLtoSjQ1tT2|+o@lU+)BY4fx0qa>Vyw64tFmbthC??_=mT%VpDpIDvC$4K*Ogb!8!^Lg{chA6^#g>&&KPx%b?6%|^eT z)uNE_s)qs?u)e6vm=~%gnvMafU4+!Dl5PsCWWRh!TQs{aBq=sM+x|XBL_6t1WB7*F zkz-rMte6Zwj?CrL$Lx=)x+MV{n%{LKvR{d8x-za+9H5)g?hsoyNvu7(C7M+!e$iK$ z?U*HY2c=Yb-gsAAME@jRv98}bwZ|U$Ue7O_jmjP`#WQeJx=^vg&MmD6bmZM(jF)gh z`U3YIQ0^=FC6H#jz&5U}r_e-0TZ1*A$JAa^cgkt&%U;Jb`K5|TvV82I33ULK3(h46 z-z8t!l}m~Yt6D@=aC{5SCHD-U;R(Ea9NtC%j`l`B^q8-53o<)sVjtZEGzdare=R+T z_I-}@AsUZHD;FnWwp6qUNQA|kkf0qr5d?=5cbnpk$x&V`7c^QJcbj)_H>`2wrK?BZ zs=s89>DghveVE{zoQ&YCv=#xqWNhxvx5J)i2EOW__pP5OR)st%Squ)gpyuu27x`bY z$@0pxYPKV6$+Hu$;-!z;6&lp>T+TQ3D)edAuo4-PTrsFN8B)zLfH%A6w}>pfT;%r1 zSmSOHh+avIoZ;p?@@AZ5o;(){+qJG0&`ceXnQ2zoEQPj->faAmEu0wn{4^SDUCuad zS^V;nhalvF0m8VxoJjHyM1mIn-cT0i)#^Ni6x*UhX->M^TEWu~%|9m361@02H7TTT zWGqg8SwEq2#c<|!h zy3@j2#5&+Gi7urhA1k%IGc(W{3$o(YK`JpU;ULur2pf$fSn4X>6o>XIwA9^R2T^1W z@Fe4@3DTRsMGIjIn={v)LiZ(A-=|lmX0K;We|1nq){d2 zYq4x*h$Y5hnX-t=Y!N~acc9TvXHp<-s3f$Z=3C)StLT1>uI{Fa{p)k?_sk(`tSM=i z-&Vj0Lll6zF5zTdBC{<02Kl!0@WF1eg7YKgL7TDl`JptND7uH5MtV0t*aibxNyXwR~Qel%Ah(XucOc z9s+EMB7%CSDpVRY>(GG0Q8Ud%HZzDkVUlVl?O<#|oY8@V&Dk?$f ztOv~A{lbYkxagyB7J|QLe0+d?w-`&Bhl2I{FT}WYY>)styi*(pByz>C6_w_YV^Z`v zJ{|tXyR@}d6C=C<@`h`Fo}me;P5{9i^t2Q;#A8&L%b`wx^BUj|%IJaYfjgg9QWA8L z1=hoF$!qMcS-18L0%md)rgAR740$VRqdi^L-zY0(ljO)EG5Gx$Dqj!buKmJ!?(c%` zO|LxV9}wZu(tnh<4NE4r?@?%rq%@EEYwMNtFPY>(V&3vX2A$PuJ#Tp1uNlk8le%Z} z%c9X9@#F3pwoMr&8Q(~E7;Li7+|LOc$bbZ&V7eW0p+*5zYIP``TTQo34XcxaWJ`4K z@;?6LZP#vaQTwIf=#ECbdSGyfY>|3^$}CkE=;2Rh(?V^uikugLkVi}~{(*no91k26 z=!^8bl%}j?!DWC)@3}_8BHGCOsuv;gI-SiBq^^MXFNMXY(v-(|*fcev@uxdYRy`md zwj9H}SwbP{Q2-xOt^07Eh!4DF!{P&vy_W2F>H1^^if+ywNkXkyZh*>n&>d>fx1l-5 z&&~i)g+byx*QH?CgnnyS=Z9+xja!fxFp;@82+EAhg24v z6(k}o$+I84N&RCOB0IONg@M8o2mL_JYS|jM)wb~|l&NI&n(@J-9zY?($5R0}doeGwj3OMb?%?D>pXYB2NO=f}>`l6R5ovDOSwa3uGK?zV`aQS{~KPD~tos075?aCb)l@v~rc0F0@PBoMTlMB+KwX{Af0Dy zDUV#RfU=no9&LkC&=DxmIZ=59Fs56IKJ?|%Un?-mY&bs7oHd!@|Ef*-DSSg^_9=W~ zrhL9=lWh~F)5GZf`NvgwbfsC5o3-hD`qP>sbtg=SZAxez$? z>iPyIK&DUyCg*E z_ZL$~e2Qf7%aON_?#~$Nw?HdC7!9%srD3qvDldQv>+yf^Btowx;47)0#`u<>P1(C; zP6()LK{4n@r9?W^P9T<4HB>MYHzFPjObZx&ycNA(MywJ;Iuk?}4kL?6)<&F%cW$D0 z#GAJx`m9v3P3ZzW2$jnHLUIx+gnuH5BB!=WlGP&7v~!HcGkU$6oEg?ddAKg-dfUkr zI3>9aZ@VZqLR6Hl<48IOXt^d_F8)MT^JCiI1Y%YOE3nt}+e{?exC?2O|I~GtvNs7t zM2(-4S+?aCoFko3CTRqN0HUYn&8^tmH6@`}p%){x>*9hy*kz!a%%X;y#a~cTc&4 zD7+(kpsOz6Oh6>bcTebrpvAx zXS~qSNy}Fi*PC6vA0Ox~m_vRH96Gm*PbeK@=@iO(?bgH-GWly5p_@kXT4ee+_IEk9 zDBQ1g4e&k2U4qeu{fj*!KB6<`wVT?|qc8%)JDp zAFO}HFdSg@qfPIvo4e2T03)0okP&<56{D9A?6Q^Kz)so>9z^w%1S(2Fj%r7gjo;9y zB_n1-KU)53wN|T|RI-v4WfXEFPIDLW!CNLmcSdecD_$jSdGy(L!qeT}BF(xFb61L% z2{=s<1=48qK9*hf{B`K%#WE2>F=-Ux#59?*|G7bKWt;?Zgym>0A{D+oVFV(yT;e`!yn&k&($(F2q$m}sKT0TgV^sRjqgKx`5 zy~{zBvzb|5k(I5%qVe0BsqC~5^_#bDic5Db-E+M5FtGLl$am>@O=nx=CM4ZEZ11HS zDS%#F%3{NwzI-OGTdSBOUCpm!UZ>+WMTv+BnWNz6FoLAU9U2p8pmP==t+<_iX#?lhMKFw;wtV?m!Xyi^tD5 zxjn+ZEtk>Npih^XndQ=BK zdo_5DClG%w|LM+z`g}yIXy`@ua57vzi6%L>g!#mFxE#2DrQb+fXDoR!v08fS(Kg;v zvg=&FIpkjy>Y(A5`dGes%dPq3d!CGf1XMXM_7O&mK|Y>orq@~+zuVbDn_s#fwzCB8 z?LA#Blj>|*FB0DO6Dl4(4W30FKW+gZlD>a)n34iUB_s%Y)vo5>A3Zzmg8UpG8d}a> zrZwE<9_3A5ZDT?rfTo}89lx|CzRaXFYhOQ|pO z%iI5HCmaR4w;VqI^#Yj3IeZfd%`{H_})rTa$H#qjoQ|C7UqnzKlon9I;>IRU?%3-R6XGQL)i zg)53yn)6%leKcZbIHCJf?t4Q+1>17$y=}CX<+BYG_Q%VHP@K!Ml*^={tATOFg~uPM z)7?)G)!TcYChxbGT~30{B25fG^kF}4mmj3r6&rAI9QWtPpFi(glvywU!C$$IIhjw7 zm}^YBaZTz!IQOK!e%`eH`Qhv4>H%qMeRYGVR+WjiA;SELu*I{MSJrdJASsi)gO5p( z-R(MITg@Xl7@+ng$Y%MqVQS-Z`t|eu;x?j&5clzp2Pxn4VRUuqUAN!!H=VQHNgvI0 zeL}j2fjN+@-^HVFW$FkC(08}vtV76bXY?Gqf9p81O{;ZRW->H?Sa`Ns@TtMUy{#YU zH0FO?q(5y~I@em@c)5lx%g@uT;!rBz`tY{GYr5E_QNdx!YdZO?eO&WlL|*u**5m8l z&uGxo#v5T1lfB0XY6mpCj3Go&#?$L-6Bn6JPu9Ck<4A56_9gT6z~_|<+WW5#0}0y- z;H+@6Pi=p`__T|&vBgry3sG!pIpHSf3JCZD5f1n;ezcftYuj#Tci5l!-V#GZ%iJSx zadvUo-rU^AMvMwacJBSC24#Ja&*WPb$XaP6c;I*ZXG=LT3LNn7C+W$o_Sj>ynHiuVJD9N3)$`!z@j^)KpMx zeMEH^D$*qYRsf9$BYXA(bp#A;`nMxXe61Mlis*+bqVPFV3wOL_&JJLAvZdnWiFoXFvHw?FJe*21%U=2IrXrX-3JxE6=Qw zGG;t)?gau{`*#)EB%t~2v&Khb&@(k?xtYQSO3bH&_ zb>b?yYVi)E@60X=;*9JqQag~+4;!r|O^Vi{cv0T$O(cZn2&)_GOQ#g7{mRnG-osu66T$vc<+4wf6G(6-{%rl4xA7=c~Mq|=%(4p|eUwe;69^Sx{n&4%uV%B6s zu~TKOlA*4`qpst%{-EnV!B$vBZtZ@3gR`F0E}_?soz}yb_LEA^RXH(lm%!%=oZiP! z30W2j8Vfw-;30k=P3tiLe!WBqtY_x{gu}j^8xXyfr0)bDCbPl+|8s+7UH~yrTZaMp zAb@Vx-M#*S9=^5C7WCdfAs`%1FOReO(AC0XDtT5Ets9m#$K_Aq9H!;jQ}P)Z_b>-Vr&tm68H84qY6`KlV*rMgJj&fPm6BzOfFo(i(#som4V z;A_5q%!~ntlK%KUT%Lmx=)nNrB}e80VU+Ykike7v9T^$x6M>emWT(E>135BVes5Aj zqZUOUVu)4R7kG?(PLbsJ%3nb~Mx4Q<_PAqClK4#KjtK2R6zq6?nMqUzvhVndA(-4d zR;cp7hzZ0daVT_Vc6@W;rG!CB^L0*z{CSk<(TRqm&9@YY#YH1a4XkW}uEeqzz-J=* zC`ZZB{z`g1^9K{OY-P}QSoTO__|-V3&jbMjX$xe9ih`QY}JWS|D>A=$^1)kD3lyMWwcQD}!Cl8hV-OhjZC+>M)18IVO@ z#$KSlyF~1w<;Q}NWdbVDS^-HNcg~db0-6vsD1K7EiYOQt2gkOKgAM*%BPcTgqfGOh zy(Y)PI-1=x;&8-RHS`>DKTF==8q%gR}kgY18kyZtE7wfp1z`Ix%s5@eg;hjp{EWd?wt?{a1dB>+p^6YosTFx~pbK8lb*)^h~ zw;(r%6ybaS%y7V4Lbk>K@p*k6^PaJ%8x7mW$jzV+Xs3xNll2V?@wP*MfaLCb#IR%<+M)jOwvK&!UJ zaFt|2T!oh)I@hCkvWigzWs)LfT!ENf*3&ETg6ouY$iR~Kd&OcYaG~G_y#}S7_TnPIEkEvCFXv;Rn3;;2(AAihv@VAQ;uFXMqwb<&9Z;lhB7ck#GhmV>*<)mFuKRTtofBH@&eHV~*v&O_=8>wcm_Z3A#6`_Xm z9+M-CHosS#j!Ei)v&z_l3#%_--56-9a_oo)7|WFVB^j@Jw{rocU>3h{K>g_dj$uoT zT)jOuD&JI$jst^Ru5UF-2x}-ktlQ#2T2&DH6ee$@-L*5UtTXF$K?z{O@pElVD@8rb zkF3a)i@+cZHCmgyTVDLJ(e0qI(Txv*FF68#ln$D3Uj7I^I1myjn1qQ+9T%<#WTKG@ zpaHTKNG6?Z8`A;97ZEPv$dZGtx`mmuAJv$oKJ{du5?sJilX?OCt`hP*%`Y)9Pe@jFQJFxWmUV#xvFG3ht+wfnAiE_ zIUFRM78{-Pe&tgpd)Ck>pvLBl2le{XTaMT8g|ud3SXIVA^5nF)yofqFa8XKTv!qFY zph0m9_3c(uy+kj&W3ySv%}?TxTviN{1u9-q3qZu;uP9}1YH{~2(n+LpH+=)f3|$m^lZtMn+T8G7MI|Pi1HzwuZqDI(30FS@=B$u zh?9>E%KSltov*w>?Jkaps1m16l}#sOi^~!k)f^Aqy*TID0fFSQ&k3F)j*O? zBqVj*HVXW#%h3wQ^}a zXz7GmJb`vBA6$s|HQhW%io!TM!(jgHRvFYolBrPw5}68GBvf319@oi&cPN~E6`ZF5 zdGqHOTE3|Y8qQWs*1EaE0y4JNVCZpjh zqhSWO!Yw^?h0p!3p_ZE=@VdUGxd8Ri*>$ix_BbXP&@}wne%+T`-Jh|0Z(%h2!gdq2$`|K1Yeg@6pcnaqOM>nUl-?o8;bI3& z(u|g>qz$1n4dc8+w2;HKV4OhS_NAj9mc>?q{b|UTLkZ*c>TQMctAjWXWD$B`1F{M& z-3&zP1JV2ROFXTq_2|lg9u^252uOGAD;{+il`4EJl}~4LvnRs{Oe()Dy4q9x4UMr_ z8yhGJ-hU2}Dtng%4)0}hNn$*-;)LB>tk`+(2~kSYbWJr=sJ5Gcs+KZ ztVN^NVG>N29wl1j4@vFRZtCO+3_fTM#&FF+r_&0e2t(L5L)1q3Nr&rZ%GX0_i%bGp z>D^C_z!N-yM@{j);eA+hl3U(A>l~3@rBTs_M1D_5Rr%YrT5&231E~n=V?M}vt0GwI z^fBqP4!kK5a>v@(Tzh*MJl)TBwi0!fNMu^&_`2j?tV?WDv1Z7TdjJ<0puIhHl5Vn{ zy;&V!XP|kuxdZjV&}-x}Wt8D+&noF~(J4{ty)=G`ExuJH08JRoJR`zr22JhU2~{v^ z87d1RYX{Hk;*Bk?hQ##k?aW+nY_=0J4W2-_dWIbZq^XK?L@RcVoZg`X(VJW=rIM|F z=y+KvnGR2(of`9xl8G#2z*3aY?i`EujQ6-iwTsZrZc5o^sP@6I7(t>3eq7iO#9NQQG}j0}^Ops7u%^LZpgM zu1=nEb|+@(%?JnPLN4k(i0anH!#E4CL?A zipg$;l8QSZj)>9Gjx-r_aId*PBBa?sit|oGb~}dHXh>T^3lFTO zR28#EMDN{WX3nC$WdORn*rDspM}ryzZ34^QDSV_X(0apDvYY2BMJ1c*<~OC}^lB(bIVk350cqopm|gE`2i z(eTw0-P+GXuX=odT8#DjEenzp)=ljRjO+cuhxQPGjB0W-L(xFElXuag?cY*uV8^x& z4pLzTt#iGLyZoi777({vCOWu{TiGuOD#UVhe@)cBmA`LAab|IGwE1NSrSP$H$~B8X z`?8;>;93Tuz@BoANRN>v*cT-gR@?Tc7s^b#7c*W@1A^ZA0`l0}K}h4;0${Vd0b9r4 zh4A9lwj1Is>d-O}B5#^C#DV3K;g!RJZhby$C%Z^Aut#(0jlhJ84ri@q*fAqTYw*<< zmKuu#MimnzNBY2Y;Sb78%~*#_1|nC|u&vY+n!~&`oq5KL5Qb`psP=(ZOS2GIawr5i zK@)T2weAlb#+8XKwq?C;d+$O;O^w{!2MRc2aH%`k`nsfmsiHJ1Pj&u?nSQUJ`Hgq` z*1az6lLMA!7WL3B+O{R1)AeFT864j@E-^M7c?}m+E~1w8n4g&ryTD)cFY2-GT$-=v zX};lJRCw5`_L4W}_I2Q~D!psnQO;;FfV*`dRQWO#~Psm|KxMQZImTG?B`~gR4 zaK6!84#^GOm)yA_l)&WGgDT`Mr>flYFg#ydkD}soAm&$Ivbmu>7SyI!Gbn-BC4KO3 zA;siA_D%A0AhJ{3YCs@va6rfDp5c)gXD_kWTPZc5(7jp2B9+u=4u0%pgAvX;QmYEm|CE zpgV1T>ul}5XUt{EN72I07{Z-`{42G}G=;m7P*OH=Xo|@@RkG)jpP59je57Bt|JPyv z*<`wUJ&9d6$))+Eory*pkNGTqZxv?}wQ9J{2$1DRcnalIsp2}Cwar64LJ0Lhj1Xs| z>gOnyqfo9z<`fc>o0!LEfNEQJo5cEl>=LOAg-}O~$n52Hs3ea>u(wccsY#&P)vDy# z>u5{s#->GMhNVJJaLn6odihoMzi-^X40GE4SayztkBc?15~+j|1FEA1g&8U2?g;1djRlZ`?H8ECFpx_Jx%l{S=o4nu-j0Q=7(q z)iXL4ZYxr654{H1pYcHQ)5zX104-UvR8C^{{E_G1I3t=K;>gU>N)g_h9mx z#QRB@m8)ghl%6P!?n583@5)ahm*S557NHE!(%bCXdXni=6h%_B&$Aw+%dOR>4?xl% ztjlft@t*|R+^&&WpT6zwjXqhbUatB#F$adSI0#K&n%%c3at#`oEH%2QOlJ`>KUJNK zz_C8%{B!|-wO1S3mQ)8i_)x!j;?xuV>O6!vX0hHpb)CWMFn7nHb=v)Ew*T7qG3NPj z=HRgP{&8R!hsuEeX0Ssz>Y03UtPbdYOL(-B{F7Vr(1xKkfr}*L=0SA+&}Dwm;qhQ2 z+ilrRD*|Cp;me_3Zwn@R#8sYsBm4ku}=R&F#DL7T*$w%X$ zM=>y=k=>VaK(3ET&4x{cRqV;pV&(23Cuy$M93*(e;CV5}%|A?7bLo9JY*g#>O+h!3 zt)E-Jd0K;GR{-ZYDmO*Wzz4{I-{az;h*v7V8;C5B8di`BKcZ%9<9Ycxe*ULs+my%Q zv_pos(f080QZMw0x0)AJKVf#IDII|{nLjkX!lTz1=-trZ&3cnE+3OS;Sg}*9ZnBtA zig#nddH5!Dui*0ooMW>J-zj~Cm)OYcM}oyC0XoY5c!1>F=`zH_RJ8@B81La79v+`X zU)Z0?amo^;UD~9v+M_H&}s%Hi(dY%;RD`|OfekIRZ+V_yAnfT`Z0so;{woJ&vQ& zm;7zSuxdT^spe{%iq(=b3-)yBBY?lO_2n77-^*xnvN#v=$V3z`2rJ<3JcpJsjA5}!Hhp~Z}U1Cl}qy9#lO|AZe6ldf% z#=I4G_7@jw1?hZDD|t=T+yJ3N8SGyS0I|dYFKnC5xJ1s_l!-IwuwKA7w*=rt75_*C zLxQ^MdI-`W0dF=g4Vh!MTJffAk^M*G(pV1=B6e1=u$qROJanUHDh`1ur z?`b8{W&^iQfvRz^C+!AW<&2@2)p333U+wCUyfeSynR%Hmn5U$h2X8hLqUamFiq4s;cbO<9 z@nqoBuqmoC*r{=WBh0b*{%@)3d*S@r#W!-u_JzJBEXL7s8gLN1p1%S{9Akf|+%}P$ z(EJ&@%WASz5Q9O`Z2B0c`(+c)Y?h(%XU8z+U>;^&6Q<}7!M!}m_7pErDnnzn^&av$FcW8g z2Ixu1^%}Q%6}~t75~mNy#5l$o`CMyIvB9kzmq zku$yma#KRIKaH!2nH@Z^iMzZ!c%WnM!|0|&sIjGB{f5pu76>ulF-mDv6q6NcL~1WJ z)U@Ia(hl0E2dW0_1@aa)?A`QWUt#xX5luBM%te5Bc$ij8YM3!31RMhE_+mLi(KS^^_N-6}s}pQ1jj zIXx|O7P@OsJZSX;E;0I!8lTx`vF=iN6B}yTM%9 z;^Y^mkkB6mkamf;L^9S@9-_b zo`jjoR17E->Bf?bGLOand6@#fB5$#SpeEv7%cw7AX-G=c=aiddSmBOHOXc7XJ=76N zLEF|}2UNLqEWg>b-@?*8f8&=PyleRBjx-Z3lzdo#WhDQ3`a!pWnoCD;3sg+DLyePz zIkA$7|G*rN5C3zmX++Vo2`6Yx&xDL!6n3Qm{Spa?yjle&ZX4@|X_vmXS$b3CeQ%gM=>hp@uHu zh8a79+H6X8=B&AmPT8*on3Ti1yi0~5`qADx$wnC$ST6lKnjfSI-=2ybCve@xol(&- z2*=F7*%w?Z(0%wGZxgsx$Nowxdhp=AVGu^wSVapf44ujWiq;ZTG!)JynGqRV?xMJI z1+a>+q@+AIv$93$mtDFZ+|0h?(Jcy~iMHMDq7tf|*+$BtK=6!08nUDfZps9EpMCyL z(txJ6X3-Gv)(~dY0*b$#JgpOa^7V+`O%`i`i4IJSoVWc(?ARNIoR(yH?ygB&4`FW6&Kn8q)(N~f_VrF zn(pI@n2_P{NY`qk{ue(gFubY`4A1lD~cjfna?kXtrY9bYb>jVY+11B)Uw_0R*F{MheaEP>vxQo9${ZZ_z%_@k z>TeEB07ZUeY;9H;iE!Unh*};CThOG5^YO@um-aQx_eEqMr4U?c_+JDR-^_vNUg0@N zRQKjoWin5r7wfMaa6jVS(s|{+azFe!sTl}M(3+o4NVdHXA;Mn4suhfdde;&mc{0Y1 z%ueIK84#xu7IwfwmGW3`E|_kRR) zttaHKQ$X_^P~=xoR%E%`C;g}dDw?k&v-f6CauKvL6s^jrO?NnR z9Vhr!I+h6F#N0twd1EcXaNZfvlT_D?ea{`>AF(e^o7U_v>H!@iAmG8bMN-V7UGP=D0bcjAT2;8}45$fO>xp!$o^ zF|&c1*&RN%mVf3=XOj*sH!g!PW%yO_)?nd;9$Eya?;7KDFEC|0-2koD1@lKSs(UXg z_X-jw5w@|ibcm=Jst_z*b%>)W5|Ju~A9gCbUa5@iT!`nJF|;lG|09Q9#`X z4hN?d>d|knGKD{ z!IU$p7Y)e=PvDvHF?0B7!Ik(k!=cmVk2ScvEAMkRQdA|hr`2F$Mf>KeI@U3@^LtRL zP|JzLv_a{!KuqhyslWiqchz+V0c~8ro3m`ir6%uu6~AvM3yJBW8uA&_OL$R+!0Q&Y z$Fx+1B&gc{HD|ZO$uS&xcs;eRIp$r-6y*tk0Jy!uJo4y8CVQJ69=@ zwYa%$NaZbU*M_WZZXEl!-5DR)Y7gtt^coJUBc-0qA3r#FG(SA`x#>L1&$i$`J#`u` z+#CU!m+r~$kJG*()q#wrO_!r7V8esE=D_&tr%iv4DY?s% zyG1Br8Q^n}Uw}{?@Ot>nBql9IXTA02s;_hAeiN5OoOt;M2(V2u@4i)u7KFClcD9dm z-k(5OuV@y3)K||Bblq-WGQ6~ZJ~d3ge-vX~UbyROw9soC+j~Brlv4R@ftu<;D+IiX zyFhbj^)dda|TtlC>hx1yG>!S-vilZ zS7+tg({NU&vA(vPH?%(9KkCG;?%jAmeO_#)<%_T;quuf6CiTp;+x-eYBq3w#Gwb?S zX}~nns%UZ__hM#TW81pzje`ZFk-l4ks#DXEnmHF|p7L7ujR*Uh%}f2Hx z`-4ReR=){)BPsl$3kj+FaRu`mYatqjU`P z-05`kxayPxYFM_WhR4|t;)LEmiP;1F6Y?|aGuo3F z=cL~JAh%CS@D|LFYzkp&%Mem3E;nm;`Re%%%mY)%Fb4U8#7YH3zkqU50dg-IF&_HhA0GcnPBi{hiFKi040V*$?>z{G}*?(l@ zg7h|9_5Lg#eKkj-G{pj+csH5uo;^4NEehUKQLX z`cE5zVt~~PK}a#c@uhNG42XNFd@2EW{;t#`lmfW^1ekeS0myo(XOF zOMDqB<5v7ZR0xP2W)NvA02^df0YIwvYXdBxzOd6D2JFAIkR1VF{OO$GD8ThkZE6f) z3jKn8GX+rkW8T;qK>Q09L^B5nd*M+z2Vj4xY|a6AUMeK>0O6N@>ClI>VUR#Tn1Zb5 z0X$^C-)ZT`&wDh$g8Pa90Rawg@Q)Hn4$Arfi5ag;fe9jA1R#_CCU3^v-zPwXfEWbd zbs_x+F$6>i2x9@j^1_o9W(6u7+(}+=%mku;5iN53;aSRw1p2T5Kz>m#Y5_n9$Q1d5 zaTI;^BJG7yrs=gTBKU<{CO!lN<-ZsuzI!1Dp)3MOUYLNhM-T#5wf-<+)jaLsY;?4gsOX0|7zsZ+X^#NY57m*e|4HdMTgo zUqL{igUtzU@s9!-^+ySXXmpV5(qAP`V3DXs!O=r|iNRCmAH)z40vUfeM=Sv_UwV?c z1n!AQ^&jZy(qDO#Y!oIR!6A+&z{aQe7lWzQKk}kCF#c;fzpDXsL@;?Xcv%0ZC%b>h zuVYBk zfJ9dS%r8{3SwX`AU~>`iLO>Ays~73)pqLc^{fh;fR=^g}QU%Sd0A4b1gAi5$gis|0 zkRVwDNaFva^}ixO{fjA-AINbP!1f{<|8sWt2{d@Xz*Ygf(?3c@AgFy6K>H$k4=f3C z8U(urU?%_V@7KB>$5vop#{v%;c%=VPY@#4Rh*6NF|3@9_hU$?r@F=x|o6-EE0a$bz zC}R!4^}-mZ&_{9j81Rtce{@01@ z*TE*V{0@p-|GOBp{RYI%fmk-ciOnw80r)T7#n=EtOE!K(|11Y9m4}NO;FUcL83Ka- zA4)+$ygB|&g$Fu0g8Z`{yfA15Gk}}}&20c^U#xu1`eP0j1?+cVU;p>|5C{VTXOR8l z2LB4E@-HdfuYObEfPyvw*e?RoHo+QBGyH~PgFZ34D}kn$D)3q-W_ zccT33()GW1^YZ;BWqe@^+yb}d7XHl^6I8whAo!#GKX38q zMX*QUas00*N1Oj4mD&d2ypaCq*Z}PD|DKE4!GB0&w*lBZzuoPhGQk(e|4}*<0T7PP zR_?6-^@=|a-}!Cu2yB9`w*e$Cj@1@yZ7mLffJpp5e*Ovs04XJdhhK3A@Dkz%WVi#K zSJBBYU?NcR4gmRu1=b^DmI_#QDELSA4;>&Nj8p%zSl$6(y>za(h&K)wtZD#wg8aL4 zr>P*_U9dE!UBFAcHIUFQfDn|o3pTjzE`aD|7$U(Sz)%)wbq|0GYTE_izCdSxL;b(~ zfr7UHe=Oc!+8&+;)&?9L?SHf)%Kk&D2j=pkRQab2N?b760N4)y?!eVw(vfTc(*K#c zXZKVn*5GDn;OF7LC%bI!ZwX8<+M(}*wL8uG4Il=M<^jM7lfk=`7nJdDlz09gZ4&nX z&c)*2(6@zupy2%n-V5#BZ|KjzP?m$gnrFJ#3HX6KSqA?7`_UO*^oN1(0RZ!*lmFSI zfJw>0zyB7I;y>4*#we z{lB3arGIFZjsP$qs3QQ*OK*sdz^Leo|DfnzpgzB$>^1*^!hq0f!1iiB`fIO&-*~Q; zf8e2y0R%5i$&bOP%(j1^z`G<^kkRqqrVhXHMZ^EV7aaf9rQ3s;O2 zFsfwqA1IlVzlJdSjiUVZ4^;BW-)CLpZ&bw8KTwua;MEBJ^zXwxem3!o9@uZbfak%# v2W@fYPb1mWzfz1(!K_^7{y^pC0Lb8Ua4^5OUl8)(f6w48%EKJkX(9d}J3g}x From 94298d90dafad13c97292f93ecf89c575c017119 Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Fri, 10 Apr 2015 22:41:22 +0900 Subject: [PATCH 22/22] WIP: transitions into & out of inner proj nesting are problems, yo! --- src/machi_chain_manager1.erl | 44 +++++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/src/machi_chain_manager1.erl b/src/machi_chain_manager1.erl index d17ff4a..90ca727 100644 --- a/src/machi_chain_manager1.erl +++ b/src/machi_chain_manager1.erl @@ -903,9 +903,25 @@ io:format(user, "<--x=~w-oooo-~w-~w-~w->", [X, MyName, P_newprop_flap_count,Flap react_to_env_C100(P_newprop, P_latest, #ch_mgr{name=MyName, proj=P_current}=S) -> ?REACT(c100), + + %% TODO 2015-04-10 + %% OK, well, we need to be checking sanity on inner projections here, + %% but how to do it is still a bit of a mystery. + %% + %% If the *Y bindings are identical to incoming args, then we aren't + %% checking at all. That's bad, but we don't go into Infinite Loops of + %% ReallyReallyBad. + + P_newpropY = P_newprop, + P_latestY = P_latest, + P_currentY = P_current, + %% P_newpropY = inner_projection_or_self(P_newprop), + %% P_latestY = inner_projection_or_self(P_latest), + %% P_currentY = inner_projection_or_self(P_current), + I_am_UPI_in_newprop_p = lists:member(MyName, P_newprop#projection_v1.upi), I_am_Repairing_in_latest_p = lists:member(MyName, - P_latest#projection_v1.repairing), + P_latest#projection_v1.repairing), ShortCircuit_p = P_latest#projection_v1.epoch_number > P_current#projection_v1.epoch_number andalso @@ -913,9 +929,18 @@ react_to_env_C100(P_newprop, P_latest, andalso I_am_Repairing_in_latest_p, - case {ShortCircuit_p, projection_transition_is_sane(P_current, P_latest, - MyName)} of - _ when P_current#projection_v1.epoch_number =< 0 -> + Current_sane_p = projection_transition_is_sane(P_current, P_latest, + MyName), + Inner_sane_p = + if P_currentY == P_current, P_latestY == P_latest -> + true; + true -> + projection_transition_is_sane(P_currentY, P_latestY, MyName) + end, + + case {ShortCircuit_p, Current_sane_p} of + _ when P_current#projection_v1.epoch_number == 0 -> + %% Epoch == 0 is reserved for first-time, just booting conditions. ?REACT({c100, ?LINE, [first_write]}), react_to_env_C110(P_latest, S); {true, _} -> @@ -924,14 +949,21 @@ react_to_env_C100(P_newprop, P_latest, %% am/should be repairing. We ignore our proposal and try %% to go with the latest. ?REACT({c100, ?LINE, [repairing_short_circuit]}), + if Inner_sane_p == false -> io:format(user, "QQQ line ~p false\n", [?LINE]), timer:sleep(500); true -> ok end, react_to_env_C110(P_latest, S); - {_, true} -> + {_, true} when Inner_sane_p -> ?REACT({c100, ?LINE, [sane]}), + if Inner_sane_p == false -> io:format(user, "QQQ line ~p false\n", [?LINE]), timer:sleep(500); true -> ok end, react_to_env_C110(P_latest, S); {_, _AnyOtherReturnValue} -> - %% P_latest is not sane. + %% P_latest is not sane or else P_latestY is not sane. %% By process of elimination, P_newprop is best, %% so let's write it. +io:format(user, "\nUrp: ~p ~p ~p ~p\n", [MyName, ShortCircuit_p, _AnyOtherReturnValue, Inner_sane_p]), +io:format(user, "c100 P_newprop : ~w\n", [machi_projection:make_summary(P_newprop)]), +io:format(user, "c100 P_newpropY: ~w\n", [machi_projection:make_summary(P_newpropY)]), +io:format(user, "c100 P_latest : ~w\n", [machi_projection:make_summary(P_latest)]), +io:format(user, "c100 P_latestY: ~w\n", [machi_projection:make_summary(P_latestY)]), ?REACT({c100, ?LINE, [not_sane]}), react_to_env_C300(P_newprop, P_latest, S) end.