2015-04-06 05:16:20 +00:00
%% -------------------------------------------------------------------
%%
%% Machi: a small village of replicated files
%%
%% Copyright (c) 2014-2015 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
2015-04-08 05:24:07 +00:00
%% @doc The Machi chain manager, Guardian of all things related to
%% Chain Replication state, status, and data replica safety.
%%
%% The Chain Manager is responsible for managing the state of Machi's
%% "Chain Replication" state. This role is roughly analogous to the
%% "Riak Core" application inside of Riak, which takes care of
%% coordinating replica placement and replica repair.
%%
%% For each primitive data server in the cluster, a Machi FLU, there
%% is a Chain Manager process that manages its FLU's role within the
%% Machi cluster's Chain Replication scheme. Each Chain Manager
%% process executes locally and independently to manage the
%% distributed state of a single Machi Chain Replication chain.
%%
%% Machi's Chain Manager process performs similar tasks as Riak Core's
%% claimant. However, Machi has several active Chain Manager
%% processes, one per FLU server, instead of a single active process
%% like Core's claimant. Each Chain Manager process acts
%% independently; each is constrained so that it will reach consensus
%% via independent computation & action.
2015-04-06 05:16:20 +00:00
- module ( machi_chain_manager1 ) .
%% TODO: I am going to sever the connection between the flowchart and the
%% code. That diagram is really valuable, but it also takes a long time
%% to make any kind of edit; the process is too slow. This is a todo
%% item a reminder that the flowchart is important documentation and
%% must be brought back into sync with the code soon.
- behaviour ( gen_server ) .
- include ( " machi_projection.hrl " ) .
- include ( " machi_chain_manager.hrl " ) .
2015-05-11 10:50:13 +00:00
- record ( ch_mgr , {
name : : pv1_server ( ) ,
flap_limit : : non_neg_integer ( ) ,
proj : : projection ( ) ,
%%
timer : : 'undefined' | timer : tref ( ) ,
ignore_timer : : boolean ( ) ,
proj_history : : queue : queue ( ) ,
flaps = 0 : : integer ( ) ,
flap_start = ? NOT_FLAPPING
: : erlang : timestamp ( ) ,
repair_worker : : 'undefined' | pid ( ) ,
repair_start : : 'undefined' | erlang : timestamp ( ) ,
repair_final_status : : 'undefined' | term ( ) ,
runenv : : list ( ) , %proplist()
opts : : list ( ) , %proplist()
members_dict : : p_srvr_dict ( ) ,
proxies_dict : : orddict : orddict ( )
} ) .
2015-04-06 05:16:20 +00:00
- define ( D ( X ) , io : format ( user , " ~s ~p \n " , [ ? ? X , X ] ) ) .
- define ( Dw ( X ) , io : format ( user , " ~s ~w \n " , [ ? ? X , X ] ) ) .
2015-04-06 11:07:39 +00:00
- define ( FLU_PC , machi_proxy_flu1_client ) .
2015-04-09 05:44:58 +00:00
- define ( TO , ( 2 * 1000 ) ) . % default timeout
2015-04-06 11:07:39 +00:00
2015-04-06 05:16:20 +00:00
%% Keep a history of our flowchart execution in the process dictionary.
- define ( REACT ( T ) , put ( react , [ T | get ( react ) ] ) ) .
2015-05-11 10:50:13 +00:00
%% Define the period of private projection stability before we'll
%% start repair.
- ifdef ( TEST ) .
2015-05-16 08:39:58 +00:00
- define ( REPAIR_START_STABILITY_TIME , 3 ) .
2015-05-11 10:50:13 +00:00
- else . % TEST
2015-05-16 08:39:58 +00:00
- define ( REPAIR_START_STABILITY_TIME , 10 ) .
2015-05-11 10:50:13 +00:00
- endif . % TEST
2015-07-03 14:17:34 +00:00
- define ( RETURN2 ( X ) , begin put ( why2 , [ ? LINE | get ( why2 ) ] ) , X end ) .
2015-04-06 05:16:20 +00:00
%% API
2015-05-01 15:33:49 +00:00
- export ( [ start_link / 2 , start_link / 3 , stop / 1 , ping / 1 ,
2015-05-02 07:59:28 +00:00
set_chain_members / 2 , set_active / 2 ] ) .
2015-04-06 05:16:20 +00:00
- export ( [ init / 1 , handle_call / 3 , handle_cast / 2 , handle_info / 2 ,
terminate / 2 , code_change / 3 ] ) .
2015-05-08 09:17:41 +00:00
- export ( [ make_chmgr_regname / 1 , projection_transitions_are_sane / 2 ,
2015-07-03 10:21:41 +00:00
inner_projection_exists / 1 , inner_projection_or_self / 1 ,
simple_chain_state_transition_is_sane / 3 ,
2015-07-03 13:05:35 +00:00
simple_chain_state_transition_is_sane / 5 ,
2015-07-03 10:21:41 +00:00
chain_state_transition_is_sane / 5 ] ) .
%% Exports so that EDoc docs generated for these internal funcs.
- export ( [ mk / 3 ] ) .
2015-04-06 05:16:20 +00:00
- ifdef ( TEST ) .
- export ( [ test_calc_projection / 2 ,
2015-04-09 05:44:58 +00:00
test_write_public_projection / 2 ,
2015-04-06 05:16:20 +00:00
test_read_latest_public_projection / 2 ,
test_react_to_env / 1 ,
get_all_hosed / 1 ] ) .
- ifdef ( EQC ) .
- include_lib ( " eqc/include/eqc.hrl " ) .
- endif .
- ifdef ( PULSE ) .
- compile ( { parse_transform , pulse_instrument } ) .
- endif .
- include_lib ( " eunit/include/eunit.hrl " ) .
- compile ( export_all ) .
- endif . %TEST
2015-04-09 08:13:38 +00:00
start_link ( MyName , MembersDict ) - >
start_link ( MyName , MembersDict , [ ] ) .
2015-04-06 05:16:20 +00:00
2015-04-09 08:13:38 +00:00
start_link ( MyName , MembersDict , MgrOpts ) - >
2015-05-02 07:59:28 +00:00
gen_server : start_link ( { local , make_chmgr_regname ( MyName ) } , ? MODULE ,
2015-04-30 08:28:43 +00:00
{ MyName , MembersDict , MgrOpts } , [ ] ) .
2015-04-06 05:16:20 +00:00
stop ( Pid ) - >
gen_server : call ( Pid , { stop } , infinity ) .
ping ( Pid ) - >
gen_server : call ( Pid , { ping } , infinity ) .
2015-05-07 09:39:39 +00:00
%% @doc Set chain members list.
%%
%% NOTE: This implementation is a bit brittle, in that an author with
%% higher rank may try to re-suggest the old membership list if it
%% races with an author of lower rank. For now, we suggest calling
%% set_chain_members() first on the author of highest rank and finish
%% with lowest rank, i.e. name z* first, name a* last.
2015-05-01 15:33:49 +00:00
set_chain_members ( Pid , MembersDict ) - >
gen_server : call ( Pid , { set_chain_members , MembersDict } , infinity ) .
2015-05-02 07:59:28 +00:00
set_active ( Pid , Boolean ) when Boolean == true ; Boolean == false - >
gen_server : call ( Pid , { set_active , Boolean } , infinity ) .
2015-04-06 05:16:20 +00:00
- ifdef ( TEST ) .
%% Test/debugging code only.
2015-04-09 05:44:58 +00:00
test_write_public_projection ( Pid , Proj ) - >
gen_server : call ( Pid , { test_write_public_projection , Proj } , infinity ) .
2015-04-06 05:16:20 +00:00
%% Calculate a projection and return it to us.
%% If KeepRunenvP is true, the server will retain its change in its
%% runtime environment, e.g., changes in simulated network partitions.
test_calc_projection ( Pid , KeepRunenvP ) - >
gen_server : call ( Pid , { test_calc_projection , KeepRunenvP } , infinity ) .
test_read_latest_public_projection ( Pid , ReadRepairP ) - >
gen_server : call ( Pid , { test_read_latest_public_projection , ReadRepairP } ,
infinity ) .
test_react_to_env ( Pid ) - >
gen_server : call ( Pid , { test_react_to_env } , infinity ) .
- endif . % TEST
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2015-05-01 05:51:42 +00:00
%% Bootstrapping is a hassle ... when when isn't it?
%%
%% If InitMembersDict == [], then we don't know anything about the chain
%% that we'll be participating in. We'll have to wait for directions from
%% our sysadmin later.
%%
%% If InitMembersDict /= [], then we do know what chain we're
%% participating in. It's probably test code, since that's about the
%% only time that we know so much at init() time.
%%
%% In either case, we'll try to create & store an epoch 0 projection
%% and store it to both projections stores. This is tricky if
%% InitMembersDict == [] because InitMembersDict usually contains the
%% #p_svrv records that we need to *write* to the projection store,
%% even our own private store! For test code, we get the store
%% manager's pid in MgrOpts and use direct gen_server calls to the
%% local projection store.
init ( { MyName , InitMembersDict , MgrOpts } ) - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
random : seed ( now ( ) ) ,
2015-05-01 15:33:49 +00:00
init_remember_partition_hack ( ) ,
2015-05-01 05:51:42 +00:00
ZeroAll_list = [ P #p_srvr.name | | { _ , P } < - orddict : to_list ( InitMembersDict ) ] ,
ZeroProj = make_none_projection ( MyName , ZeroAll_list , InitMembersDict ) ,
ok = store_zeroth_projection_maybe ( ZeroProj , MgrOpts ) ,
2015-05-06 02:41:04 +00:00
{ MembersDict , Proj } =
get_my_private_proj_boot_info ( MgrOpts , InitMembersDict , ZeroProj ) ,
2015-05-01 05:51:42 +00:00
All_list = [ P #p_srvr.name | | { _ , P } < - orddict : to_list ( MembersDict ) ] ,
2015-04-09 08:13:38 +00:00
Opt = fun ( Key , Default ) - > proplists : get_value ( Key , MgrOpts , Default ) end ,
RunEnv = [ { seed , Opt ( seed , now ( ) ) } ,
2015-05-08 04:40:44 +00:00
{ use_partition_simulator , Opt ( use_partition_simulator , false ) } ,
2015-04-09 08:13:38 +00:00
{ network_partitions , Opt ( network_partitions , [ ] ) } ,
{ network_islands , Opt ( network_islands , [ ] ) } ,
{ up_nodes , Opt ( up_nodes , not_init_yet ) } ] ,
2015-05-06 02:41:04 +00:00
ActiveP = Opt ( active_mode , true ) ,
2015-04-09 08:13:38 +00:00
S = #ch_mgr { name = MyName ,
2015-05-06 02:41:04 +00:00
proj = Proj ,
2015-04-06 05:16:20 +00:00
%% TODO 2015-03-04: revisit, should this constant be bigger?
%% Yes, this should be bigger, but it's a hack. There is
%% no guarantee that all parties will advance to a minimum
%% flap awareness in the amount of time that this mgr will.
flap_limit = length ( All_list ) + 50 ,
2015-04-09 08:13:38 +00:00
timer = 'undefined' ,
proj_history = queue : new ( ) ,
2015-04-06 05:16:20 +00:00
runenv = RunEnv ,
2015-05-01 15:33:49 +00:00
opts = MgrOpts } ,
2015-05-07 08:52:16 +00:00
{ _ , S2 } = do_set_chain_members_dict ( MembersDict , S ) ,
2015-05-01 15:33:49 +00:00
S3 = if ActiveP == false - >
S2 ;
2015-04-09 08:13:38 +00:00
ActiveP == true - >
2015-05-01 15:33:49 +00:00
set_active_timer ( S2 )
2015-04-09 08:13:38 +00:00
end ,
2015-05-01 15:33:49 +00:00
{ ok , S3 } .
2015-04-06 05:16:20 +00:00
2015-04-09 03:16:58 +00:00
handle_call ( { ping } , _ From , S ) - >
{ reply , pong , S } ;
2015-05-07 08:52:16 +00:00
handle_call ( { set_chain_members , MembersDict } , _ From ,
#ch_mgr { name = MyName ,
proj = #projection_v1 { all_members = OldAll_list ,
epoch_number = OldEpoch ,
upi = OldUPI } = OldProj } = S ) - >
{ Reply , S2 } = do_set_chain_members_dict ( MembersDict , S ) ,
%% TODO: should there be any additional sanity checks? Right now,
%% if someone does something bad, then do_react_to_env() will
%% crash, which will crash us, and we'll restart in a sane & old
%% config.
All_list = [ P #p_srvr.name | | { _ , P } < - orddict : to_list ( MembersDict ) ] ,
MissingInNew = OldAll_list -- All_list ,
NewUPI = OldUPI -- MissingInNew ,
NewDown = All_list -- NewUPI ,
NewEpoch = OldEpoch + 1111 ,
NewProj = machi_projection : update_checksum (
OldProj #projection_v1 { author_server = MyName ,
creation_time = now ( ) ,
epoch_number = NewEpoch ,
all_members = All_list ,
upi = NewUPI ,
repairing = [ ] ,
down = NewDown ,
members_dict = MembersDict } ) ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
%% Reset all flapping state.
2015-06-02 11:32:52 +00:00
NewProj2 = NewProj #projection_v1 { flap = make_flapping_i ( ) } ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
S3 = S2 #ch_mgr { proj = NewProj2 ,
proj_history = queue : new ( ) } ,
2015-05-07 08:52:16 +00:00
{ _ QQ , S4 } = do_react_to_env ( S3 ) ,
{ reply , Reply , S4 } ;
2015-05-02 07:59:28 +00:00
handle_call ( { set_active , Boolean } , _ From , #ch_mgr { timer = TRef } = S ) - >
case { Boolean , TRef } of
{ true , undefined } - >
S2 = set_active_timer ( S ) ,
{ reply , ok , S2 } ;
{ false , _ } - >
( catch timer : cancel ( TRef ) ) ,
{ reply , ok , S #ch_mgr { timer = undefined } } ;
_ - >
{ reply , error , S }
end ;
2015-04-09 03:16:58 +00:00
handle_call ( { stop } , _ From , S ) - >
{ stop , normal , ok , S } ;
2015-04-06 05:16:20 +00:00
handle_call ( { test_calc_projection , KeepRunenvP } , _ From ,
#ch_mgr { name = MyName } = S ) - >
RelativeToServer = MyName ,
{ P , S2 } = calc_projection ( S , RelativeToServer ) ,
{ reply , { ok , P } , if KeepRunenvP - > S2 ;
true - > S
end } ;
2015-04-09 05:44:58 +00:00
handle_call ( { test_write_public_projection , Proj } , _ From , S ) - >
{ Res , S2 } = do_cl_write_public_proj ( Proj , S ) ,
{ reply , Res , S2 } ;
2015-04-06 05:16:20 +00:00
handle_call ( { test_read_latest_public_projection , ReadRepairP } , _ From , S ) - >
{ Perhaps , Val , ExtraInfo , S2 } =
do_cl_read_latest_public_projection ( ReadRepairP , S ) ,
Res = { Perhaps , Val , ExtraInfo } ,
{ reply , Res , S2 } ;
handle_call ( { test_react_to_env } , _ From , S ) - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
{ TODOtodo , S2 } = do_react_to_env ( S ) ,
2015-04-06 05:16:20 +00:00
{ reply , TODOtodo , S2 } ;
handle_call ( _ Call , _ From , S ) - >
{ reply , whaaaaaaaaaa , S } .
handle_cast ( _ Cast , S ) - >
? D ( { cast_whaaaaaaaaaaa , _ Cast } ) ,
{ noreply , S } .
2015-05-07 08:52:16 +00:00
handle_info ( tick_check_environment , #ch_mgr { ignore_timer = true } = S ) - >
{ noreply , S } ;
handle_info ( tick_check_environment , S ) - >
2015-05-11 10:50:13 +00:00
{ { _ Delta , Props , _ Epoch } , S1 } = do_react_to_env ( S ) ,
S2 = sanitize_repair_state ( S1 ) ,
S3 = perhaps_start_repair ( S2 ) ,
2015-05-06 02:41:04 +00:00
case proplists : get_value ( throttle_seconds , Props ) of
N when is_integer ( N ) , N > 0 - >
2015-05-07 08:52:16 +00:00
%% We are flapping. Set ignore_timer=true and schedule a
%% reminder to stop ignoring. This slows down the rate of
%% flapping. If/when the yo:tell_author_yo() function in
%% state C200 is ever implemented, then it should be
%% implemented via the test_react_to_env style.
erlang : send_after ( N * 1000 , self ( ) , stop_ignoring_timer ) ,
2015-05-11 10:50:13 +00:00
{ noreply , S3 #ch_mgr { ignore_timer = true } } ;
2015-05-06 02:41:04 +00:00
_ - >
2015-05-11 10:50:13 +00:00
{ noreply , S3 }
2015-05-07 08:52:16 +00:00
end ;
handle_info ( stop_ignoring_timer , S ) - >
{ noreply , S #ch_mgr { ignore_timer = false } } ;
2015-05-11 10:50:13 +00:00
handle_info ( { 'DOWN' , _ Ref , process , Worker , Res } ,
#ch_mgr { repair_worker = Worker } = S ) - >
{ noreply , S #ch_mgr { ignore_timer = false ,
repair_worker = undefined ,
repair_final_status = Res } } ;
2015-04-06 05:16:20 +00:00
handle_info ( Msg , S ) - >
2015-04-30 08:28:43 +00:00
case get ( todo_bummer ) of undefined - > io : format ( " TODO: got ~p \n " , [ Msg ] ) ;
_ - > ok
end ,
put ( todo_bummer , true ) ,
2015-04-06 05:16:20 +00:00
{ noreply , S } .
terminate ( _ Reason , _ S ) - >
ok .
code_change ( _ OldVsn , S , _ Extra ) - >
{ ok , S } .
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2015-04-14 06:30:24 +00:00
make_none_projection ( MyName , All_list , MembersDict ) - >
Down_list = All_list ,
UPI_list = [ ] ,
machi_projection : new ( MyName , MembersDict , UPI_list , Down_list , [ ] , [ ] ) .
2015-05-06 02:41:04 +00:00
get_my_private_proj_boot_info ( MgrOpts , DefaultDict , DefaultProj ) - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
get_my_proj_boot_info ( MgrOpts , DefaultDict , DefaultProj , private ) .
get_my_public_proj_boot_info ( MgrOpts , DefaultDict , DefaultProj ) - >
get_my_proj_boot_info ( MgrOpts , DefaultDict , DefaultProj , public ) .
get_my_proj_boot_info ( MgrOpts , DefaultDict , DefaultProj , ProjType ) - >
2015-05-01 05:51:42 +00:00
case proplists : get_value ( projection_store_registered_name , MgrOpts ) of
undefined - >
2015-05-06 02:41:04 +00:00
{ DefaultDict , DefaultProj } ;
2015-05-01 05:51:42 +00:00
Store - >
{ ok , P } = machi_projection_store : read_latest_projection ( Store ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
ProjType ) ,
2015-05-06 02:41:04 +00:00
{ P #projection_v1.members_dict , P }
2015-05-01 05:51:42 +00:00
end .
%% Write the epoch 0 projection store, to assist bootstrapping. If the
%% 0th epoch is already written, there's no problem.
store_zeroth_projection_maybe ( ZeroProj , MgrOpts ) - >
case proplists : get_value ( projection_store_registered_name , MgrOpts ) of
undefined - >
ok ;
Store - >
_ = machi_projection_store : write ( Store , public , ZeroProj ) ,
_ = machi_projection_store : write ( Store , private , ZeroProj ) ,
ok
end .
2015-04-09 08:13:38 +00:00
set_active_timer ( #ch_mgr { name = MyName , members_dict = MembersDict } = S ) - >
2015-04-09 12:08:15 +00:00
FLU_list = [ P #p_srvr.name | | { _ , P } < - orddict : to_list ( MembersDict ) ] ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
%% Perturb the order a little bit, to avoid near-lock-step
%% operations every few ticks.
MSec = calc_sleep_ranked_order ( 400 , 1500 , MyName , FLU_list ) +
random : uniform ( 100 ) ,
{ ok , TRef } = timer : send_interval ( MSec , tick_check_environment ) ,
2015-04-09 08:13:38 +00:00
S #ch_mgr { timer = TRef } .
2015-04-06 05:16:20 +00:00
2015-04-09 05:44:58 +00:00
do_cl_write_public_proj ( Proj , S ) - >
2015-04-06 05:16:20 +00:00
#projection_v1 { epoch_number = Epoch } = Proj ,
2015-04-09 05:44:58 +00:00
cl_write_public_proj ( Epoch , Proj , S ) .
2015-04-06 05:16:20 +00:00
cl_write_public_proj ( Epoch , Proj , S ) - >
cl_write_public_proj ( Epoch , Proj , false , S ) .
cl_write_public_proj_skip_local_error ( Epoch , Proj , S ) - >
cl_write_public_proj ( Epoch , Proj , true , S ) .
cl_write_public_proj ( Epoch , Proj , SkipLocalWriteErrorP , S ) - >
%% Write to local public projection store first, and if it succeeds,
%% then write to all remote public projection stores.
cl_write_public_proj_local ( Epoch , Proj , SkipLocalWriteErrorP , S ) .
cl_write_public_proj_local ( Epoch , Proj , SkipLocalWriteErrorP ,
2015-04-09 08:13:38 +00:00
#ch_mgr { name = MyName } = S ) - >
2015-04-06 05:16:20 +00:00
{ _ UpNodes , Partitions , S2 } = calc_up_nodes ( S ) ,
Res0 = perhaps_call_t (
2015-04-09 08:13:38 +00:00
S , Partitions , MyName ,
2015-04-09 05:44:58 +00:00
fun ( Pid ) - > ? FLU_PC : write_projection ( Pid , public , Proj , ? TO ) end ) ,
2015-04-06 05:16:20 +00:00
Continue = fun ( ) - >
2015-04-09 08:13:38 +00:00
FLUs = Proj #projection_v1.all_members -- [ MyName ] ,
2015-04-06 05:16:20 +00:00
cl_write_public_proj_remote ( FLUs , Partitions , Epoch , Proj , S )
end ,
case Res0 of
ok - >
{ XX , SS } = Continue ( ) ,
{ { local_write_result , ok , XX } , SS } ;
Else when SkipLocalWriteErrorP - >
{ XX , SS } = Continue ( ) ,
{ { local_write_result , Else , XX } , SS } ;
2015-05-01 15:33:49 +00:00
Else - >
2015-04-06 05:16:20 +00:00
{ Else , S2 }
end .
2015-04-09 05:44:58 +00:00
cl_write_public_proj_remote ( FLUs , Partitions , _ Epoch , Proj , S ) - >
2015-04-06 05:16:20 +00:00
%% We're going to be very care-free about this write because we'll rely
%% on the read side to do any read repair.
2015-04-09 05:44:58 +00:00
DoIt = fun ( Pid ) - > ? FLU_PC : write_projection ( Pid , public , Proj , ? TO ) end ,
Rs = [ { FLU , perhaps_call_t ( S , Partitions , FLU , fun ( Pid ) - > DoIt ( Pid ) end ) } | |
2015-04-06 05:16:20 +00:00
FLU < - FLUs ] ,
{ { remote_write_results , Rs } , S } .
do_cl_read_latest_public_projection ( ReadRepairP ,
2015-05-02 07:59:28 +00:00
#ch_mgr { proj = Proj1 } = S ) - >
2015-04-06 05:16:20 +00:00
_ Epoch1 = Proj1 #projection_v1.epoch_number ,
case cl_read_latest_projection ( public , S ) of
{ needs_repair , FLUsRs , Extra , S3 } - >
if not ReadRepairP - >
2015-04-30 14:16:08 +00:00
{ not_unanimous , todoxyz , [ { unanimous_flus , [ ] } ,
{ results , FLUsRs } | Extra ] , S3 } ;
2015-04-06 05:16:20 +00:00
true - >
{ _ Status , S4 } = do_read_repair ( FLUsRs , Extra , S3 ) ,
do_cl_read_latest_public_projection ( ReadRepairP , S4 )
end ;
2015-05-02 07:59:28 +00:00
{ _ UnanimousTag , _ Proj2 , _ Extra , _ S3 } = Else - >
Else
2015-04-06 05:16:20 +00:00
end .
read_latest_projection_call_only ( ProjectionType , AllHosed ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
#ch_mgr { proj = CurrentProj } = S ) - >
2015-04-06 05:16:20 +00:00
#projection_v1 { all_members = All_list } = CurrentProj ,
All_queried_list = All_list -- AllHosed ,
{ _ UpNodes , Partitions , S2 } = calc_up_nodes ( S ) ,
2015-04-09 05:44:58 +00:00
DoIt = fun ( Pid ) - >
2015-05-01 15:33:49 +00:00
case ( ? FLU_PC : read_latest_projection ( Pid , ProjectionType , ? TO ) ) of
2015-04-06 05:16:20 +00:00
{ ok , P } - > P ;
Else - > Else
end
end ,
2015-04-09 05:44:58 +00:00
Rs = [ perhaps_call_t ( S , Partitions , FLU , fun ( Pid ) - > DoIt ( Pid ) end ) | |
2015-04-06 05:16:20 +00:00
FLU < - All_queried_list ] ,
2015-05-01 15:33:49 +00:00
%% Rs = [perhaps_call_t(S, Partitions, FLU, fun(Pid) -> DoIt(Pid) end) ||
%% FLU <- All_queried_list],
2015-04-06 05:16:20 +00:00
FLUsRs = lists : zip ( All_queried_list , Rs ) ,
{ All_queried_list , FLUsRs , S2 } .
cl_read_latest_projection ( ProjectionType , S ) - >
AllHosed = [ ] ,
cl_read_latest_projection ( ProjectionType , AllHosed , S ) .
cl_read_latest_projection ( ProjectionType , AllHosed , S ) - >
{ All_queried_list , FLUsRs , S2 } =
read_latest_projection_call_only ( ProjectionType , AllHosed , S ) ,
2015-05-18 08:32:22 +00:00
rank_and_sort_projections_with_extra ( All_queried_list , FLUsRs ,
ProjectionType , S2 ) .
2015-04-06 05:16:20 +00:00
2015-05-18 08:32:22 +00:00
rank_and_sort_projections_with_extra ( All_queried_list , FLUsRs , ProjectionType ,
2015-04-30 14:16:08 +00:00
#ch_mgr { name = MyName , proj = CurrentProj } = S ) - >
2015-05-18 08:32:22 +00:00
UnwrittenRs = [ x | | { _ , { error , not_written } } < - FLUsRs ] ,
2015-04-06 05:16:20 +00:00
Ps = [ Proj | | { _ FLU , Proj } < - FLUsRs , is_record ( Proj , projection_v1 ) ] ,
BadAnswerFLUs = [ FLU | | { FLU , Answer } < - FLUsRs ,
not is_record ( Answer , projection_v1 ) ] ,
if All_queried_list == [ ]
orelse
length ( UnwrittenRs ) == length ( FLUsRs ) - >
2015-04-30 14:16:08 +00:00
NoneProj = make_none_projection ( MyName , [ ] , orddict : new ( ) ) ,
Extra2 = [ { all_members_replied , true } ,
{ all_queried_list , All_queried_list } ,
{ flus_rs , FLUsRs } ,
{ unanimous_flus , [ ] } ,
{ not_unanimous_flus , [ ] } ,
{ bad_answer_flus , BadAnswerFLUs } ,
{ not_unanimous_answers , [ ] } ,
{ trans_all_hosed , [ ] } ,
{ trans_all_flap_counts , [ ] } ] ,
{ not_unanimous , NoneProj , Extra2 , S } ;
2015-05-18 08:32:22 +00:00
ProjectionType == public , UnwrittenRs / = [ ] - >
2015-04-06 05:16:20 +00:00
{ needs_repair , FLUsRs , [ flarfus ] , S } ;
true - >
[ { _ Rank , BestProj } | _ ] = rank_and_sort_projections ( Ps , CurrentProj ) ,
NotBestPs = [ Proj | | Proj < - Ps , Proj / = BestProj ] ,
UnanimousTag = if NotBestPs == [ ] - > unanimous ;
true - > not_unanimous
end ,
Extra = [ { all_members_replied , length ( FLUsRs ) == length ( All_queried_list ) } ] ,
Best_FLUs = [ FLU | | { FLU , Projx } < - FLUsRs , Projx == BestProj ] ,
TransAllHosed = lists : usort (
lists : flatten ( [ get_all_hosed ( P ) | | P < - Ps ] ) ) ,
AllFlapCounts = merge_flap_counts ( [ get_all_flap_counts ( P ) | |
P < - Ps ] ) ,
Extra2 = [ { all_queried_list , All_queried_list } ,
{ flus_rs , FLUsRs } ,
{ unanimous_flus , Best_FLUs } ,
{ not_unanimous_flus , All_queried_list --
( Best_FLUs ++ BadAnswerFLUs ) } ,
{ bad_answer_flus , BadAnswerFLUs } ,
{ not_unanimous_answers , NotBestPs } ,
{ trans_all_hosed , TransAllHosed } ,
{ trans_all_flap_counts , AllFlapCounts } | Extra ] ,
{ UnanimousTag , BestProj , Extra2 , S }
end .
do_read_repair ( FLUsRs , _ Extra , #ch_mgr { proj = CurrentProj } = S ) - >
2015-05-18 08:32:22 +00:00
Unwrittens = [ x | | { _ FLU , { error , not_written } } < - FLUsRs ] ,
2015-04-06 05:16:20 +00:00
Ps = [ Proj | | { _ FLU , Proj } < - FLUsRs , is_record ( Proj , projection_v1 ) ] ,
if Unwrittens == [ ] orelse Ps == [ ] - >
{ nothing_to_do , S } ;
true - >
%% We have at least one unwritten and also at least one proj.
%% Pick the best one, then spam it everywhere.
[ { _ Rank , BestProj } | _ ] = rank_and_sort_projections ( Ps , CurrentProj ) ,
Epoch = BestProj #projection_v1.epoch_number ,
%% We're doing repair, so use the flavor that will
%% continue to all others even if there is an
%% error_written on the local FLU.
{ _ DontCare , _ S2 } = Res = cl_write_public_proj_skip_local_error (
Epoch , BestProj , S ) ,
Res
end .
calc_projection ( S , RelativeToServer ) - >
calc_projection ( S , RelativeToServer , [ ] ) .
calc_projection ( #ch_mgr { proj = LastProj , runenv = RunEnv } = S ,
RelativeToServer , AllHosed ) - >
Dbg = [ ] ,
OldThreshold = proplists : get_value ( old_threshold , RunEnv ) ,
NoPartitionThreshold = proplists : get_value ( no_partition_threshold , RunEnv ) ,
calc_projection ( OldThreshold , NoPartitionThreshold , LastProj ,
RelativeToServer , AllHosed , Dbg , S ) .
%% OldThreshold: Percent chance of using the old/previous network partition list
%% NoPartitionThreshold: If the network partition changes, what percent chance
%% that there are no partitions at all?
2015-04-10 12:59:56 +00:00
%% AllHosed: FLUs that we must treat as if they are down, e.g., we are
%% in a flapping situation and wish to ignore FLUs that we
%% believe are bad-behaving causes of our flapping.
2015-04-06 05:16:20 +00:00
calc_projection ( _ OldThreshold , _ NoPartitionThreshold , LastProj ,
RelativeToServer , AllHosed , Dbg ,
2015-05-16 08:39:58 +00:00
#ch_mgr { name = MyName ,
runenv = RunEnv1 ,
repair_final_status = RepairFS } = S ) - >
2015-04-06 05:16:20 +00:00
#projection_v1 { epoch_number = OldEpochNum ,
2015-04-09 08:13:38 +00:00
members_dict = MembersDict ,
upi = OldUPI_list ,
repairing = OldRepairing_list
} = LastProj ,
2015-04-06 05:16:20 +00:00
LastUp = lists : usort ( OldUPI_list ++ OldRepairing_list ) ,
AllMembers = ( S #ch_mgr.proj ) #projection_v1.all_members ,
{ Up0 , Partitions , RunEnv2 } = calc_up_nodes ( MyName ,
AllMembers , RunEnv1 ) ,
Up = Up0 -- AllHosed ,
NewUp = Up -- LastUp ,
Down = AllMembers -- Up ,
NewUPI_list = [ X | | X < - OldUPI_list , lists : member ( X , Up ) ] ,
2015-05-08 06:36:53 +00:00
LastInNewUPI = case NewUPI_list of
[ ] - > does_not_exist_because_upi_is_empty ;
[ _ | _ ] - > lists : last ( NewUPI_list )
end ,
2015-04-06 05:16:20 +00:00
Repairing_list2 = [ X | | X < - OldRepairing_list , lists : member ( X , Up ) ] ,
2015-05-08 06:36:53 +00:00
Simulator_p = proplists : get_value ( use_partition_simulator , RunEnv2 , false ) ,
2015-04-06 05:16:20 +00:00
{ NewUPI_list3 , Repairing_list3 , RunEnv3 } =
case { NewUp , Repairing_list2 } of
{ [ ] , [ ] } - >
2015-05-07 08:52:16 +00:00
D_foo = [ ] ,
2015-04-06 05:16:20 +00:00
{ NewUPI_list , [ ] , RunEnv2 } ;
2015-05-08 06:36:53 +00:00
{ [ ] , [ H | T ] } when RelativeToServer == LastInNewUPI - >
%% The author is tail of the UPI list. Let's see if
2015-04-06 05:16:20 +00:00
%% *everyone* in the UPI+repairing lists are using our
%% projection. This is to simulate a requirement that repair
%% a real repair process cannot take place until the chain is
%% stable, i.e. everyone is in the same epoch.
2015-04-10 12:59:56 +00:00
%% TODO create a real API call for fetching this info?
SameEpoch_p = check_latest_private_projections_same_epoch (
2015-06-04 05:31:58 +00:00
NewUPI_list ++ Repairing_list2 ,
2015-04-06 05:16:20 +00:00
S #ch_mgr.proj , Partitions , S ) ,
2015-06-03 06:26:22 +00:00
if Simulator_p andalso SameEpoch_p - >
2015-05-07 08:52:16 +00:00
D_foo = [ { repair_airquote_done , { we_agree , ( S #ch_mgr.proj ) #projection_v1.epoch_number } } ] ,
2015-05-08 06:36:53 +00:00
{ NewUPI_list ++ [ H ] , T , RunEnv2 } ;
2015-05-16 08:39:58 +00:00
not Simulator_p
andalso
RepairFS == { repair_final_status , ok } - >
D_foo = [ { repair_done , { repair_final_status , ok , ( S #ch_mgr.proj ) #projection_v1.epoch_number } } ] ,
{ NewUPI_list ++ Repairing_list2 , [ ] , RunEnv2 } ;
2015-05-08 06:36:53 +00:00
true - >
D_foo = [ ] ,
{ NewUPI_list , OldRepairing_list , RunEnv2 }
2015-04-06 05:16:20 +00:00
end ;
{ _ , _ } - >
2015-05-07 08:52:16 +00:00
D_foo = [ ] ,
2015-04-06 05:16:20 +00:00
{ NewUPI_list , OldRepairing_list , RunEnv2 }
end ,
Repairing_list4 = case NewUp of
[ ] - > Repairing_list3 ;
NewUp - > Repairing_list3 ++ NewUp
end ,
Repairing_list5 = Repairing_list4 -- Down ,
TentativeUPI = NewUPI_list3 ,
TentativeRepairing = Repairing_list5 ,
{ NewUPI , NewRepairing } =
if TentativeUPI == [ ] andalso TentativeRepairing / = [ ] - >
[ FirstRepairing | TailRepairing ] = TentativeRepairing ,
{ [ FirstRepairing ] , TailRepairing } ;
true - >
{ TentativeUPI , TentativeRepairing }
end ,
2015-04-09 08:13:38 +00:00
P = machi_projection : new ( OldEpochNum + 1 ,
MyName , MembersDict , Down , NewUPI , NewRepairing ,
D_foo ++
Dbg ++ [ { ps , Partitions } , { nodes_up , Up } ] ) ,
2015-04-06 05:16:20 +00:00
{ P , S #ch_mgr { runenv = RunEnv3 } } .
2015-04-10 12:59:56 +00:00
check_latest_private_projections_same_epoch ( FLUs , MyProj , Partitions , S ) - >
2015-06-04 05:31:58 +00:00
%% NOTE: The caller must provide us with the FLUs list for all
%% FLUs that must be up & available right now. So any
%% failure of perhaps_call_t() means that we must return
%% false.
2015-04-06 05:16:20 +00:00
FoldFun = fun ( _ FLU , false ) - >
false ;
( FLU , true ) - >
2015-04-09 05:44:58 +00:00
F = fun ( Pid ) - >
? FLU_PC : read_latest_projection ( Pid , private , ? TO )
2015-04-06 05:16:20 +00:00
end ,
case perhaps_call_t ( S , Partitions , FLU , F ) of
{ ok , RemotePrivateProj } - >
if MyProj #projection_v1.epoch_number ==
RemotePrivateProj #projection_v1.epoch_number
andalso
MyProj #projection_v1.epoch_csum ==
RemotePrivateProj #projection_v1.epoch_csum - >
true ;
true - >
false
end ;
_ - >
false
end
end ,
lists : foldl ( FoldFun , true , FLUs ) .
calc_up_nodes ( #ch_mgr { name = MyName , proj = Proj , runenv = RunEnv1 } = S ) - >
AllMembers = Proj #projection_v1.all_members ,
{ UpNodes , Partitions , RunEnv2 } =
calc_up_nodes ( MyName , AllMembers , RunEnv1 ) ,
{ UpNodes , Partitions , S #ch_mgr { runenv = RunEnv2 } } .
calc_up_nodes ( MyName , AllMembers , RunEnv1 ) - >
2015-05-01 15:33:49 +00:00
case proplists : get_value ( use_partition_simulator , RunEnv1 ) of
true - >
calc_up_nodes_sim ( MyName , AllMembers , RunEnv1 ) ;
false - >
{ AllMembers -- get ( remember_partition_hack ) , [ ] , RunEnv1 }
end .
calc_up_nodes_sim ( MyName , AllMembers , RunEnv1 ) - >
{ Partitions2 , Islands2 } = machi_partition_simulator : get ( AllMembers ) ,
catch ? REACT ( { calc_up_nodes , ? LINE , [ { partitions , Partitions2 } ,
{ islands , Islands2 } ] } ) ,
2015-04-06 05:16:20 +00:00
UpNodes = lists : sort (
[ Node | | Node < - AllMembers ,
not lists : member ( { MyName , Node } , Partitions2 ) ,
not lists : member ( { Node , MyName } , Partitions2 ) ] ) ,
RunEnv2 = replace ( RunEnv1 ,
[ { network_partitions , Partitions2 } ,
{ network_islands , Islands2 } ,
{ up_nodes , UpNodes } ] ) ,
{ UpNodes , Partitions2 , RunEnv2 } .
replace ( PropList , Items ) - >
2015-05-07 08:52:16 +00:00
Tmp = Items ++ PropList ,
[ { K , proplists : get_value ( K , Tmp ) } | | K < - proplists : get_keys ( Tmp ) ] .
2015-04-06 05:16:20 +00:00
2015-04-09 08:13:38 +00:00
rank_and_sort_projections ( [ ] , CurrentProj ) - >
rank_projections ( [ CurrentProj ] , CurrentProj ) ;
2015-04-06 05:16:20 +00:00
rank_and_sort_projections ( Ps , CurrentProj ) - >
Epoch = lists : max ( [ Proj #projection_v1.epoch_number | | Proj < - Ps ] ) ,
MaxPs = [ Proj | | Proj < - Ps ,
Proj #projection_v1.epoch_number == Epoch ] ,
%% Sort with highest rank first (custom sort)
lists : sort ( fun ( { RankA , _ } , { RankB , _ } ) - > RankA > RankB end ,
rank_projections ( MaxPs , CurrentProj ) ) .
%% Caller must ensure all Projs are of the same epoch number.
%% If the caller gives us projections with different epochs, we assume
%% that the caller is doing an OK thing.
2015-04-14 06:30:24 +00:00
%%
%% TODO: This implementation currently gives higher rank to the last
%% member of All_list, which is typically/always/TODO-CLARIFY
%% sorted. That's fine, but there's a source of unnecessary
%% churn: during repair, we assume that the head of the chain is
%% the coordinator of the repair. So any time that the head
%% makes a repair-related transition, that projection may get
%% quickly replaced by an identical projection that merely has
%% higher rank because it's authored by a higher-ranked member.
%% Worst case, for chain len=4:
%% E+0: author=a, upi=[a], repairing=[b,c,d]
%% E+1: author=b, upi=[a], repairing=[b,c,d] (**)
%% E+2: author=c, upi=[a], repairing=[b,c,d] (**)
%% E+3: author=d, upi=[a], repairing=[b,c,d] (**)
%% E+4: author=a, upi=[a,b], repairing=[c,d]
%% E+5: author=b, upi=[a,b], repairing=[c,d] (**)
%% E+6: author=c, upi=[a,b], repairing=[c,d] (**)
%% E+7: author=d, upi=[a,b], repairing=[c,d] (**)
2015-05-20 02:11:54 +00:00
%% E+... 6 more (**) epochs when c & d finish their repairs.
2015-04-14 06:30:24 +00:00
%% Ideally, the "(**)" epochs are avoidable churn.
%% Perhaps this means that we should change the responsibility
%% for repair management to the highest ranking member of the
%% UPI_list?
2015-04-14 07:17:49 +00:00
%% TODO Hrrrmmmmm ... what about the TODO comment in A40's A40a clause?
%% That could perhaps resolve this same problem in a better way?
2015-04-06 05:16:20 +00:00
rank_projections ( Projs , CurrentProj ) - >
#projection_v1 { all_members = All_list } = CurrentProj ,
MemberRank = orddict : from_list (
lists : zip ( All_list , lists : seq ( 1 , length ( All_list ) ) ) ) ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
N = ? MAX_CHAIN_LENGTH + 1 ,
2015-04-06 05:16:20 +00:00
[ { rank_projection ( Proj , MemberRank , N ) , Proj } | | Proj < - Projs ] .
rank_projection ( #projection_v1 { upi = [ ] } , _ MemberRank , _ N ) - >
- 100 ;
rank_projection ( #projection_v1 { author_server = Author ,
2015-04-09 08:13:38 +00:00
upi = UPI_list ,
2015-04-14 06:30:24 +00:00
repairing = Repairing_list } , MemberRank , N ) - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
%% It's possible that there's "cross-talk" across projection
%% stores. For example, we were a chain of [a,b], then the
%% administrator sets a's members_dict to include only a.
%% However, b is still running and has written a public projection
%% suggestion to a, and a has seen it. (Or perhaps b has old
%% chain information from one/many configurations ago, and its
%% projection store was not wiped clean, then b was restarted &
%% begins using its local outdated projection information.)
%%
%% Server b is no longer a member of a's MemberRank scheme, so we
%% need to compensate for this by giving b an extremely low author
%% ranking.
AuthorRank = case orddict : find ( Author , MemberRank ) of
{ ok , Rank } - > Rank ;
error - > - ( N * N * N * N )
end ,
2015-04-13 15:54:38 +00:00
AuthorRank +
2015-04-06 05:16:20 +00:00
( N * length ( Repairing_list ) ) +
( N * N * length ( UPI_list ) ) .
2015-05-07 08:52:16 +00:00
do_set_chain_members_dict ( MembersDict , #ch_mgr { proxies_dict = OldProxiesDict } = S ) - >
2015-05-17 14:48:05 +00:00
_ = ? FLU_PC : stop_proxies ( OldProxiesDict ) ,
ProxiesDict = ? FLU_PC : start_proxies ( MembersDict ) ,
2015-05-06 02:41:04 +00:00
{ ok , S #ch_mgr { members_dict = MembersDict ,
2015-05-17 14:48:05 +00:00
proxies_dict = ProxiesDict } } .
2015-05-01 15:33:49 +00:00
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
do_react_to_env ( #ch_mgr { name = MyName ,
proj = #projection_v1 { epoch_number = Epoch ,
members_dict = [ ] = OldDict } = OldProj ,
opts = Opts } = S ) - >
%% Read from our local *public* projection store. If some other
%% chain member has written something there, and if we are a
%% member of that chain, then we'll adopt that projection and then
%% start actively humming in that chain.
{ NewMembersDict , NewProj } =
get_my_public_proj_boot_info ( Opts , OldDict , OldProj ) ,
case orddict : is_key ( MyName , NewMembersDict ) of
false - >
{ { empty_members_dict , [ ] , Epoch } , S } ;
true - >
{ _ , S2 } = do_set_chain_members_dict ( NewMembersDict , S ) ,
{ { empty_members_dict , [ ] , Epoch } ,
S2 #ch_mgr { proj = NewProj , members_dict = NewMembersDict } }
end ;
2015-04-06 05:16:20 +00:00
do_react_to_env ( S ) - >
put ( react , [ ] ) ,
react_to_env_A10 ( S ) .
react_to_env_A10 ( S ) - >
? REACT ( a10 ) ,
react_to_env_A20 ( 0 , S ) .
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
react_to_env_A20 ( Retries , #ch_mgr { name = MyName } = S ) - >
2015-04-06 05:16:20 +00:00
? REACT ( a20 ) ,
2015-05-01 15:33:49 +00:00
init_remember_partition_hack ( ) ,
2015-04-06 05:16:20 +00:00
{ UnanimousTag , P_latest , ReadExtra , S2 } =
do_cl_read_latest_public_projection ( true , S ) ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
LastComplaint = get ( rogue_server_epoch ) ,
case orddict : is_key ( P_latest #projection_v1.author_server ,
S #ch_mgr.members_dict ) of
false when P_latest #projection_v1.epoch_number / = LastComplaint - >
put ( rogue_server_epoch , P_latest #projection_v1.epoch_number ) ,
Rogue = P_latest #projection_v1.author_server ,
2015-05-17 10:00:51 +00:00
error_logger : info_msg ( " Chain manager ~w found latest public "
" projection ~w has author ~w not a member "
" of our members list ~w . Please check "
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
" chain membership on this "
2015-05-17 10:00:51 +00:00
" rogue chain manager ~w . \n " ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
[ S #ch_mgr.name ,
P_latest #projection_v1.epoch_number ,
Rogue ,
[ K | | { K , _ } < - orddict : to_list ( S #ch_mgr.members_dict ) ] ,
Rogue ] ) ;
_ - >
ok
end ,
case lists : member ( MyName , P_latest #projection_v1.all_members ) of
2015-05-11 10:00:21 +00:00
false when P_latest #projection_v1.epoch_number / = LastComplaint ,
P_latest #projection_v1.all_members / = [ ] - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
put ( rogue_server_epoch , P_latest #projection_v1.epoch_number ) ,
error_logger : info_msg ( " Chain manager ~p found latest public "
" projection ~p has author ~p has a "
" members list ~p that does not include me. \n " ,
[ S #ch_mgr.name ,
P_latest #projection_v1.epoch_number ,
P_latest #projection_v1.author_server ,
P_latest #projection_v1.all_members ] ) ;
_ - >
ok
end ,
2015-04-06 05:16:20 +00:00
%% The UnanimousTag isn't quite sufficient for our needs. We need
%% to determine if *all* of the UPI+Repairing FLUs are members of
2015-05-01 15:33:49 +00:00
%% the unanimous server replies. All Repairing FLUs should be up
%% now (because if they aren't then they cannot be repairing), so
%% all Repairing FLUs have no non-race excuse not to be in UnanimousFLUs.
2015-04-06 05:16:20 +00:00
UnanimousFLUs = lists : sort ( proplists : get_value ( unanimous_flus , ReadExtra ) ) ,
UPI_Repairing_FLUs = lists : sort ( P_latest #projection_v1.upi ++
P_latest #projection_v1.repairing ) ,
All_UPI_Repairing_were_unanimous = UPI_Repairing_FLUs == UnanimousFLUs ,
%% TODO: investigate if the condition below is more correct?
%% All_UPI_Repairing_were_unanimous = (UPI_Repairing_FLUs -- UnanimousFLUs) == [],
LatestUnanimousP =
if UnanimousTag == unanimous
andalso
All_UPI_Repairing_were_unanimous - >
? REACT ( { a20 , ? LINE } ) ,
true ;
UnanimousTag == unanimous - >
? REACT ( { a20 , ? LINE , [ { upi_repairing , UPI_Repairing_FLUs } ,
{ unanimous , UnanimousFLUs } ] } ) ,
false ;
UnanimousTag == not_unanimous - >
? REACT ( { a20 , ? LINE } ) ,
false ;
true - >
exit ( { badbad , UnanimousTag } )
end ,
react_to_env_A30 ( Retries , P_latest , LatestUnanimousP , ReadExtra , S2 ) .
react_to_env_A30 ( Retries , P_latest , LatestUnanimousP , _ ReadExtra ,
#ch_mgr { name = MyName , proj = P_current ,
flap_limit = FlapLimit } = S ) - >
? REACT ( a30 ) ,
2015-04-10 12:59:56 +00:00
{ P_newprop1 , S2 } = calc_projection ( S , MyName ) ,
2015-04-14 06:30:24 +00:00
? REACT ( { a30 , ? LINE , [ { current , machi_projection : make_summary ( S #ch_mgr.proj ) } ] } ) ,
2015-04-09 08:13:38 +00:00
? REACT ( { a30 , ? LINE , [ { newprop1 , machi_projection : make_summary ( P_newprop1 ) } ] } ) ,
2015-04-06 05:16:20 +00:00
%% Are we flapping yet?
{ P_newprop2 , S3 } = calculate_flaps ( P_newprop1 , P_current , FlapLimit , S2 ) ,
%% Move the epoch number up ... originally done in C300.
#projection_v1 { epoch_number = Epoch_newprop2 } = P_newprop2 ,
2015-04-13 15:54:38 +00:00
#projection_v1 { epoch_number = Epoch_latest ,
author_server = Author_latest } = P_latest ,
2015-04-06 05:16:20 +00:00
NewEpoch = erlang : max ( Epoch_newprop2 , Epoch_latest ) + 1 ,
P_newprop3 = P_newprop2 #projection_v1 { epoch_number = NewEpoch } ,
2015-04-09 08:13:38 +00:00
? REACT ( { a30 , ? LINE , [ { newprop3 , machi_projection : make_summary ( P_newprop3 ) } ] } ) ,
2015-04-06 05:16:20 +00:00
{ P_newprop10 , S10 } =
case get_flap_count ( P_newprop3 ) of
{ _ , P_newprop3_flap_count } when P_newprop3_flap_count > = FlapLimit - >
2015-04-13 15:54:38 +00:00
AllHosed = get_all_hosed ( P_newprop3 ) ,
2015-04-06 05:16:20 +00:00
{ P_i , S_i } = calc_projection ( S3 , MyName , AllHosed ) ,
P_inner = case lists : member ( MyName , AllHosed ) of
false - >
P_i ;
true - >
2015-04-10 12:59:56 +00:00
P_i #projection_v1 {
upi = [ MyName ] ,
repairing = [ ] ,
down = P_i #projection_v1.all_members
-- [ MyName ] }
2015-04-06 05:16:20 +00:00
end ,
FinalInnerEpoch =
2015-04-10 12:59:56 +00:00
case inner_projection_exists ( P_current ) of
false - >
2015-06-02 07:26:49 +00:00
FinalCreation = P_newprop3 #projection_v1.creation_time ,
2015-04-06 05:16:20 +00:00
AllFlapCounts_epk =
[ Epk | | { { Epk , _ FlTime } , _ FlCount } < -
get_all_flap_counts ( P_newprop3 ) ] ,
case AllFlapCounts_epk of
[ ] - >
P_newprop3 #projection_v1.epoch_number ;
[ _ | _ ] - >
lists : max ( AllFlapCounts_epk )
end ;
2015-04-10 12:59:56 +00:00
true - >
P_oldinner = inner_projection_or_self ( P_current ) ,
if P_oldinner #projection_v1.upi ==
P_inner #projection_v1.upi
2015-04-06 05:16:20 +00:00
andalso
2015-04-10 12:59:56 +00:00
P_oldinner #projection_v1.repairing ==
P_inner #projection_v1.repairing
2015-04-06 05:16:20 +00:00
andalso
2015-04-10 12:59:56 +00:00
P_oldinner #projection_v1.down ==
P_inner #projection_v1.down - >
2015-06-02 07:26:49 +00:00
FinalCreation = P_oldinner #projection_v1.creation_time ,
2015-04-14 06:30:24 +00:00
P_oldinner #projection_v1.epoch_number ;
2015-04-06 05:16:20 +00:00
true - >
2015-06-02 07:26:49 +00:00
FinalCreation = P_newprop3 #projection_v1.creation_time ,
2015-04-06 05:16:20 +00:00
P_oldinner #projection_v1.epoch_number + 1
end
end ,
2015-04-14 09:19:08 +00:00
%% TODO: When we implement the real chain repair function, we
%% need to keep in mind that an inner projection with
%% up nodes > 1, repair is required there! In the
%% current simulator, repair is not simulated and
%% finished (and then growing the UPI list). Fix.
2015-06-02 07:26:49 +00:00
P_inner2 = machi_projection : update_checksum (
P_inner #projection_v1 { epoch_number = FinalInnerEpoch ,
creation_time = FinalCreation } ) ,
2015-06-02 11:55:18 +00:00
InnerInfo = [ { inner_summary ,
machi_projection : make_summary ( P_inner2 ) } ] ,
2015-04-06 05:16:20 +00:00
DbgX = replace ( P_newprop3 #projection_v1.dbg , InnerInfo ) ,
? REACT ( { a30 , ? LINE , [ qqqwww | DbgX ] } ) ,
2015-06-02 11:55:18 +00:00
{ P_newprop3 #projection_v1 { dbg = DbgX ,
inner = P_inner2 } , S_i } ;
2015-04-06 05:16:20 +00:00
_ - >
{ P_newprop3 , S3 }
end ,
2015-04-14 07:17:49 +00:00
%% Here's a more common reason for moving from inner projection to
%% a normal projection: the old proj has an inner but the newprop
%% does not.
MoveFromInnerToNorm_p =
case { inner_projection_exists ( P_current ) ,
inner_projection_exists ( P_newprop10 ) } of
{ true , false } - > true ;
{ _ , _ } - > false
end ,
%% If P_current says that we believe that we're currently flapping,
%% and if P_newprop10 says that we're no longer flapping, then we
%% really ought to stop flapping, right.
2015-04-13 15:54:38 +00:00
%%
2015-04-14 07:17:49 +00:00
%% Not quite so simple....
2015-04-13 15:54:38 +00:00
%%
%% AAAAH, right. The case I'm dealing with right now is an asymmetric
%% partition in a 4 member chain that affects all_hosed=[a,b,c] but
%% member D is *NOT* noticing anything different in the current scheme:
%% {inner_projection_exists(current), inner_projection_exists(new)}
%% is {true, true}.
%% Yes, that hypothesis is confirmed by time-honored io:format() tracing.
%%
%% So, we need something to kick a silly member like 'd' out of its
2015-04-14 07:17:49 +00:00
%% rut of am-still-flapping. So, let's try this:
%% If we see a P_latest from author != MyName, and if P_latest's
%% author's flap count is now 0 (latest!), but that same member's
2015-04-13 15:54:38 +00:00
%% flap count in P_current is non-zero, then we assume that author
2015-04-14 07:17:49 +00:00
%% has moved out of flapping state and that therefore we ought to do
%% the same.
2015-04-13 15:54:38 +00:00
%% Remember! P_current is this manager's private in-use projection.
2015-04-14 07:17:49 +00:00
%% It is always less than or equal to P_latest's epoch!
2015-04-13 15:54:38 +00:00
Current_flap_counts = get_all_flap_counts ( P_current ) ,
Latest_authors_flap_count_current = proplists : get_value (
Author_latest , Current_flap_counts ) ,
Latest_flap_counts = get_all_flap_counts ( P_latest ) ,
Latest_authors_flap_count_latest = proplists : get_value (
Author_latest , Latest_flap_counts ) ,
Kicker_p = case { Latest_authors_flap_count_current ,
Latest_authors_flap_count_latest } of
{ NotUndef , undefined } when NotUndef / = undefined - >
true ;
{ _ , _ } - >
false
end ,
2015-04-14 07:17:49 +00:00
if MoveFromInnerToNorm_p orelse Kicker_p - >
2015-04-14 06:30:24 +00:00
ClauseInfo = [ { inner_kicker , Kicker_p } ,
{ move_from_inner , MoveFromInnerToNorm_p } ] ,
? REACT ( { a30 , ? LINE , ClauseInfo } ) ,
%% %% 2015-04-14: YEAH, this appears to work!
%% %% 1. Create a "safe" projection that is upi=[],repairing=[]
%% %% 2. Declare it to be best & latest by pure fiat.
%% %% (The C100 transition will double-check that it's safe.)
%% %% 3. Jump to C100. Then, for the next iteration,
%% %% our P_current state to a smallest-possible-score
%% %% state ... and let the chain reassemble itself from
%% %% length zero.
%% #projection_v1{epoch_number=Epoch_newprop10, all_members=All_list,
%% members_dict=MembersDict} = P_newprop10,
%% P_noneprop0 = make_none_projection(MyName, All_list, MembersDict),
%% P_noneprop1 = P_noneprop0#projection_v1{epoch_number=Epoch_newprop10},
%% %% Just to be clear, we clobber any flapping info by setting dbg.
%% P_noneprop = P_noneprop1#projection_v1{dbg=ClauseInfo},
%% react_to_env_C100(P_noneprop, P_latest, S);
%% 2015-04-14: Let's experiment with using the current inner
%% projection (or, if there really is no inner, just P_current).
%% This is safe because it's already P_current and by assumption,
%% anything that made it through the logical maze to get here
%% is safe. So re-using it with a higher epoch number doesn't
%% make any significant change.
%%
%% Yeah, it appears to work, also, nice! This can help save some
%% repair operations (compared to the other safe thing to do
%% here, which uses make_none_projection() to build & repair the
2015-04-14 07:17:49 +00:00
%% entire chain from scratch). Note that this isn't a guarantee
%% that repair steps will be minimized: for a 4-member cluster
%% that has an asymmetric partition which organizes 3 clusters of
%% inner-upi=[a], inner-upi=[b], and inner-upi[c,d], there is no
%% guarantee (yet?) that the [c,d] chain will be the UPI basis
%% for repairs when the partition is healed: the quickest author
%% after the healing will make that choice for everyone.
%% TODO: Perhaps that quickest author should consult all of the
%% other private stores, check their inner, and if there is a
%% higher rank there, then goto C200 for a wait-and-see cycle?
2015-04-14 06:30:24 +00:00
P_inner2A = inner_projection_or_self ( P_current ) ,
P_inner2B =
P_inner2A #projection_v1 { epoch_number =
2015-04-13 15:54:38 +00:00
P_newprop10 #projection_v1.epoch_number ,
2015-04-14 06:30:24 +00:00
dbg = ClauseInfo } ,
react_to_env_C100 ( P_inner2B , P_latest , S ) ;
2015-04-13 15:54:38 +00:00
2015-04-14 06:30:24 +00:00
true - >
2015-04-14 09:19:08 +00:00
? REACT ( { a30 , ? LINE , [ ] } ) ,
2015-04-14 06:30:24 +00:00
react_to_env_A40 ( Retries , P_newprop10 , P_latest ,
LatestUnanimousP , S10 )
end .
2015-04-06 05:16:20 +00:00
react_to_env_A40 ( Retries , P_newprop , P_latest , LatestUnanimousP ,
#ch_mgr { name = MyName , proj = P_current } = S ) - >
? REACT ( a40 ) ,
[ { Rank_newprop , _ } ] = rank_projections ( [ P_newprop ] , P_current ) ,
[ { Rank_latest , _ } ] = rank_projections ( [ P_latest ] , P_current ) ,
LatestAuthorDownP = lists : member ( P_latest #projection_v1.author_server ,
P_newprop #projection_v1.down ) ,
if
2015-04-10 12:59:56 +00:00
%% Epoch == 0 is reserved for first-time, just booting conditions.
2015-04-09 08:47:43 +00:00
( P_current #projection_v1.epoch_number > 0
andalso
P_latest #projection_v1.epoch_number > P_current #projection_v1.epoch_number )
2015-04-06 05:16:20 +00:00
orelse
not LatestUnanimousP - >
? REACT ( { a40 , ? LINE ,
[ { latest_epoch , P_latest #projection_v1.epoch_number } ,
{ current_epoch , P_current #projection_v1.epoch_number } ,
{ latest_unanimous_p , LatestUnanimousP } ] } ) ,
%% 1st clause: someone else has written a newer projection
%% 2nd clause: a network partition has healed, revealing a
%% differing opinion.
react_to_env_B10 ( Retries , P_newprop , P_latest , LatestUnanimousP ,
Rank_newprop , Rank_latest , S ) ;
P_latest #projection_v1.epoch_number < P_current #projection_v1.epoch_number
orelse
P_latest / = P_current - >
? REACT ( { a40 , ? LINE ,
[ { latest_epoch , P_latest #projection_v1.epoch_number } ,
{ current_epoch , P_current #projection_v1.epoch_number } ,
{ neq , P_latest / = P_current } ] } ) ,
%% Both of these cases are rare. Elsewhere, the code
%% assumes that the local FLU's projection store is always
%% available, so reads & writes to it aren't going to fail
%% willy-nilly. If that assumption is true, then we can
%% reason as follows:
%%
%% a. If we can always read from the local FLU projection
%% store, then the 1st clause isn't possible because
%% P_latest's epoch # must be at least as large as
%% P_current's epoch #
%%
%% b. If P_latest /= P_current, then there can't be a
%% unanimous reply for P_latest, so the earlier 'if'
%% clause would be triggered and so we could never reach
%% this clause.
%%
%% I'm keeping this 'if' clause just in case the local FLU
%% projection store assumption changes.
react_to_env_B10 ( Retries , P_newprop , P_latest , LatestUnanimousP ,
Rank_newprop , Rank_latest , S ) ;
%% A40a (see flowchart)
Rank_newprop > Rank_latest - >
2015-04-14 07:17:49 +00:00
? REACT ( { a40 , ? LINE ,
2015-04-06 05:16:20 +00:00
[ { rank_latest , Rank_latest } ,
{ rank_newprop , Rank_newprop } ,
{ latest_author , P_latest #projection_v1.author_server } ] } ) ,
%% TODO: There may be an "improvement" here. If we're the
%% highest-ranking FLU in the all_members list, then if we make a
%% projection where our UPI list is the same as P_latest's, and
%% our repairing list is the same as P_latest's, then it may not
%% be necessary to write our projection: it doesn't "improve"
%% anything UPI-wise or repairing-wise. But it isn't clear to me
%% if it's 100% correct to "improve" here and skip writing
%% P_newprop, yet.
react_to_env_C300 ( P_newprop , P_latest , S ) ;
%% A40b (see flowchart)
P_latest #projection_v1.author_server == MyName
andalso
( P_newprop #projection_v1.upi / = P_latest #projection_v1.upi
orelse
P_newprop #projection_v1.repairing / = P_latest #projection_v1.repairing ) - >
? REACT ( { a40 , ? LINE ,
[ { latest_author , P_latest #projection_v1.author_server } ,
{ newprop_upi , P_newprop #projection_v1.upi } ,
{ latest_upi , P_latest #projection_v1.upi } ,
{ newprop_repairing , P_newprop #projection_v1.repairing } ,
{ latest_repairing , P_latest #projection_v1.repairing } ] } ) ,
react_to_env_C300 ( P_newprop , P_latest , S ) ;
%% A40c (see flowchart)
LatestAuthorDownP - >
? REACT ( { a40 , ? LINE ,
[ { latest_author , P_latest #projection_v1.author_server } ,
{ author_is_down_p , LatestAuthorDownP } ] } ) ,
%% TODO: I believe that membership in the
%% P_newprop#projection_v1.down is not sufficient for long
%% chains. Rather, we ought to be using a full broadcast
%% gossip of server up status.
%%
%% Imagine 5 servers in an "Olympic Rings" style
%% overlapping network paritition, where ring1 = upper
%% leftmost and ring5 = upper rightmost. It's both
%% possible and desirable for ring5's projection to be
%% seen (public) by ring1. Ring5's projection's rank is
%% definitely higher than ring1's proposed projection's
%% rank ... but we're in a crazy netsplit where:
%% * if we accept ring5's proj: only one functioning chain
%% ([ring4,ring5] but stable
%% * if we accept ring1's proj: two functioning chains
%% ([ring1,ring2] and [ring4,ring5] indepependently)
%% but unstable: we're probably going to flap back & forth?!
react_to_env_C300 ( P_newprop , P_latest , S ) ;
true - >
? REACT ( { a40 , ? LINE , [ true ] } ) ,
2015-04-10 12:59:56 +00:00
FinalProps = [ { throttle_seconds , 0 } ] ,
react_to_env_A50 ( P_latest , FinalProps , S )
2015-04-06 05:16:20 +00:00
end .
2015-04-10 12:59:56 +00:00
react_to_env_A50 ( P_latest , FinalProps , S ) - >
2015-04-06 05:16:20 +00:00
? REACT ( a50 ) ,
2015-04-10 12:59:56 +00:00
? REACT ( { a50 , ? LINE , [ { latest_epoch , P_latest #projection_v1.epoch_number } ,
{ final_props , FinalProps } ] } ) ,
{ { no_change , FinalProps , P_latest #projection_v1.epoch_number } , S } .
2015-04-06 05:16:20 +00:00
react_to_env_B10 ( Retries , P_newprop , P_latest , LatestUnanimousP ,
Rank_newprop , Rank_latest ,
2015-04-09 08:47:43 +00:00
#ch_mgr { name = MyName , flap_limit = FlapLimit } = S ) - >
2015-04-06 05:16:20 +00:00
? REACT ( b10 ) ,
{ _ P_newprop_flap_time , P_newprop_flap_count } = get_flap_count ( P_newprop ) ,
2015-04-14 09:19:08 +00:00
UnanimousLatestInnerNotRelevant_p =
case inner_projection_exists ( P_latest ) of
true when P_latest #projection_v1.author_server / = MyName - >
#projection_v1 { down = Down_inner } = inner_projection_or_self (
P_latest ) ,
case lists : member ( MyName , Down_inner ) of
true - >
%% Some foreign author's inner projection thinks that
%% I'm down. Silly! We ought to ignore this one.
? REACT ( { b10 , ? LINE , [ { down_inner , Down_inner } ] } ) ,
true ;
false - >
? REACT ( { b10 , ? LINE , [ { down_inner , Down_inner } ] } ) ,
false
end ;
_ Else_u - >
false
end ,
2015-04-06 05:16:20 +00:00
if
2015-04-14 09:19:08 +00:00
LatestUnanimousP
andalso
UnanimousLatestInnerNotRelevant_p - >
? REACT ( { b10 , ? LINE , [ ] } ) ,
put ( b10_hack , false ) ,
%% Do not go to C100, because we want to ignore this latest
%% proposal. Write ours instead via C300.
react_to_env_C300 ( P_newprop , P_latest , S ) ;
2015-04-06 05:16:20 +00:00
LatestUnanimousP - >
2015-04-14 07:17:49 +00:00
? REACT ( { b10 , ? LINE ,
[ { latest_unanimous_p , LatestUnanimousP } ,
{ latest_epoch , P_latest #projection_v1.epoch_number } ,
{ latest_author , P_latest #projection_v1.author_server } ,
{ newprop_epoch , P_newprop #projection_v1.epoch_number } ,
{ newprop_author , P_newprop #projection_v1.author_server }
] } ) ,
2015-04-06 05:16:20 +00:00
put ( b10_hack , false ) ,
react_to_env_C100 ( P_newprop , P_latest , S ) ;
P_newprop_flap_count > = FlapLimit - >
%% I am flapping ... what else do I do?
? REACT ( { b10 , ? LINE , [ i_am_flapping ,
{ newprop_flap_count , P_newprop_flap_count } ,
{ flap_limit , FlapLimit } ] } ) ,
_ B10Hack = get ( b10_hack ) ,
2015-06-15 03:41:16 +00:00
case proplists : get_value ( private_write_verbose , S #ch_mgr.opts ) of
true - >
io : format ( user , " {FLAP: ~w flaps ~w }! " , [ S #ch_mgr.name , P_newprop_flap_count ] ) ;
_ - >
ok
end ,
2015-04-06 05:16:20 +00:00
if
2015-04-10 12:59:56 +00:00
%% MEANWHILE, we have learned some things about this
%% algorithm in the past few months. With the introduction
%% of the "inner projection" concept, we know that the inner
%% projection may be stable but the "outer" projection will
%% continue to be flappy for as long as there's an
%% asymmetric network partition somewhere. We now know that
%% that flappiness is OK and that the only problem with it
%% is that it needs to be slowed down so that we don't have
%% zillions of public projection proposals written every
%% second.
2015-04-06 05:16:20 +00:00
%%
2015-04-10 12:59:56 +00:00
%% It doesn't matter if the FlapLimit count mechanism
%% doesn't give an accurate sense of global flapping state.
%% FlapLimit is enough to be able to tell us to slow down.
2015-04-14 07:17:49 +00:00
true - >
2015-04-10 12:59:56 +00:00
%% We already know that I'm flapping. We need to
%% signal to the rest of the world that I'm writing
%% and flapping and churning, so we cannot always
%% go to A50 from here.
%%
%% If we do go to A50, then recommend that we poll less
%% frequently.
{ X , S2 } = gimme_random_uniform ( 100 , S ) ,
if X < 80 - >
? REACT ( { b10 , ? LINE , [ flap_stop ] } ) ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
ThrottleTime = if P_newprop_flap_count < 500 - > 1 ;
P_newprop_flap_count < 1000 - > 5 ;
P_newprop_flap_count < 5000 - > 10 ;
true - > 30
2015-04-10 12:59:56 +00:00
end ,
FinalProps = [ { my_flap_limit , FlapLimit } ,
{ throttle_seconds , ThrottleTime } ] ,
react_to_env_A50 ( P_latest , FinalProps , S2 ) ;
true - >
%% It is our moral imperative to write so that
%% the flap cycle continues enough times so that
%% everyone notices then eventually falls into
%% consensus.
? REACT ( { b10 , ? LINE , [ flap_continue ] } ) ,
react_to_env_C300 ( P_newprop , P_latest , S2 )
end
2015-04-06 05:16:20 +00:00
end ;
Retries > 2 - >
? REACT ( { b10 , ? LINE , [ { retries , Retries } ] } ) ,
put ( b10_hack , false ) ,
%% The author of P_latest is too slow or crashed.
%% Let's try to write P_newprop and see what happens!
react_to_env_C300 ( P_newprop , P_latest , S ) ;
Rank_latest > = Rank_newprop
andalso
P_latest #projection_v1.author_server / = MyName - >
? REACT ( { b10 , ? LINE ,
[ { rank_latest , Rank_latest } ,
{ rank_newprop , Rank_newprop } ,
{ latest_author , P_latest #projection_v1.author_server } ] } ) ,
put ( b10_hack , false ) ,
2015-04-14 09:19:08 +00:00
%% TODO: Is a UnanimousLatestInnerNotRelevant_p test needed in this clause???
%% Give the author of P_latest an opportunity to write a
2015-04-06 05:16:20 +00:00
%% new projection in a new epoch to resolve this mixed
%% opinion.
react_to_env_C200 ( Retries , P_latest , S ) ;
true - >
? REACT ( { b10 , ? LINE } ) ,
2015-04-13 15:54:38 +00:00
? REACT ( { b10 , ? LINE , [ { retries , Retries } , { rank_latest , Rank_latest } , { rank_newprop , Rank_newprop } , { latest_author , P_latest #projection_v1.author_server } ] } ) , % TODO debug delete me!
2015-04-06 05:16:20 +00:00
put ( b10_hack , false ) ,
%% P_newprop is best, so let's write it.
react_to_env_C300 ( P_newprop , P_latest , S )
end .
react_to_env_C100 ( P_newprop , P_latest ,
#ch_mgr { name = MyName , proj = P_current } = S ) - >
? REACT ( c100 ) ,
2015-04-10 13:41:22 +00:00
2015-04-06 05:16:20 +00:00
I_am_UPI_in_newprop_p = lists : member ( MyName , P_newprop #projection_v1.upi ) ,
I_am_Repairing_in_latest_p = lists : member ( MyName ,
2015-04-10 13:41:22 +00:00
P_latest #projection_v1.repairing ) ,
Current_sane_p = projection_transition_is_sane ( P_current , P_latest ,
MyName ) ,
2015-06-04 16:06:39 +00:00
put ( xxx_hack , [ { p_current , machi_projection : make_summary ( P_current ) } ,
{ epoch_compare , P_latest #projection_v1.epoch_number > P_current #projection_v1.epoch_number } ,
{ i_am_upi_in_newprop_p , I_am_UPI_in_newprop_p } ,
2015-06-15 08:22:02 +00:00
{ i_am_repairing_in_latest_p , I_am_Repairing_in_latest_p } ] ) ,
case Current_sane_p of
2015-04-10 13:41:22 +00:00
_ when P_current #projection_v1.epoch_number == 0 - >
%% Epoch == 0 is reserved for first-time, just booting conditions.
2015-04-09 08:47:43 +00:00
? REACT ( { c100 , ? LINE , [ first_write ] } ) ,
react_to_env_C110 ( P_latest , S ) ;
2015-06-15 08:22:02 +00:00
true - >
2015-04-06 05:16:20 +00:00
? REACT ( { c100 , ? LINE , [ sane ] } ) ,
react_to_env_C110 ( P_latest , S ) ;
2015-06-15 08:22:02 +00:00
_ AnyOtherReturnValue - >
2015-04-14 07:17:49 +00:00
%% P_latest is not sane.
2015-04-06 05:16:20 +00:00
%% By process of elimination, P_newprop is best,
%% so let's write it.
? REACT ( { c100 , ? LINE , [ not_sane ] } ) ,
react_to_env_C300 ( P_newprop , P_latest , S )
end .
2015-04-09 08:13:38 +00:00
react_to_env_C110 ( P_latest , #ch_mgr { name = MyName } = S ) - >
2015-04-06 05:16:20 +00:00
? REACT ( c110 ) ,
2015-06-04 16:06:39 +00:00
%% Extra_todo = [],
Extra_todo = get ( xxx_hack ) ,
2015-06-04 08:39:29 +00:00
%% Extra_todo = [{hee, lists:reverse(get(react))}],
2015-04-14 07:17:49 +00:00
P_latest2 = machi_projection : update_dbg2 ( P_latest , Extra_todo ) ,
2015-04-06 05:16:20 +00:00
2015-04-09 08:13:38 +00:00
MyNamePid = proxy_pid ( MyName , S ) ,
2015-05-01 15:33:49 +00:00
%% This is the local projection store. Use a larger timeout, so
%% that things locally are pretty horrible if we're killed by a
%% timeout exception.
2015-05-06 02:41:04 +00:00
%% ok = ?FLU_PC:write_projection(MyNamePid, private, P_latest2, ?TO*30),
Goo = P_latest2 #projection_v1.epoch_number ,
2015-05-07 08:52:16 +00:00
%% io:format(user, "HEE110 ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse(get(react))]),
2015-05-06 02:41:04 +00:00
{ ok , Goo } = { ? FLU_PC : write_projection ( MyNamePid , private , P_latest2 , ? TO * 30 ) , Goo } ,
2015-04-06 05:16:20 +00:00
case proplists : get_value ( private_write_verbose , S #ch_mgr.opts ) of
true - >
{ _ , _ , C } = os : timestamp ( ) ,
MSec = trunc ( C / 1000 ) ,
{ HH , MM , SS } = time ( ) ,
2015-04-10 12:59:56 +00:00
case inner_projection_exists ( P_latest2 ) of
false - >
2015-06-15 03:41:16 +00:00
case proplists : get_value ( private_write_verbose , S #ch_mgr.opts ) of
true - >
io : format ( user , " \n ~2..0w : ~2..0w : ~2..0w . ~3..0w ~p uses plain: ~w \n " ,
2015-04-14 07:32:47 +00:00
[ HH , MM , SS , MSec , S #ch_mgr.name ,
machi_projection : make_summary ( P_latest2 ) ] ) ;
2015-06-15 03:41:16 +00:00
_ - >
ok
end ;
2015-04-10 12:59:56 +00:00
true - >
2015-06-15 03:41:16 +00:00
case proplists : get_value ( private_write_verbose , S #ch_mgr.opts ) of
true - >
P_inner = inner_projection_or_self ( P_latest2 ) ,
io : format ( user , " \n ~2..0w : ~2..0w : ~2..0w . ~3..0w ~p uses inner: ~w \n " ,
2015-04-10 05:15:16 +00:00
[ HH , MM , SS , MSec , S #ch_mgr.name ,
2015-06-15 03:41:16 +00:00
machi_projection : make_summary ( P_inner ) ] ) ;
_ - >
ok
end
2015-04-14 07:32:47 +00:00
end ;
2015-04-06 05:16:20 +00:00
_ - >
ok
end ,
2015-04-10 12:59:56 +00:00
react_to_env_C120 ( P_latest , [ ] , S ) .
2015-04-06 05:16:20 +00:00
2015-04-10 12:59:56 +00:00
react_to_env_C120 ( P_latest , FinalProps , #ch_mgr { proj_history = H } = S ) - >
2015-04-06 05:16:20 +00:00
? REACT ( c120 ) ,
H2 = queue : in ( P_latest , H ) ,
H3 = case queue : len ( H2 ) of
%% TODO: revisit this constant? Is this too long as a base?
%% My hunch is that it's fine and that the flap_limit needs to
%% be raised much higher (because it can increase several ticks
%% without a newer public epoch proposed anywhere).
X when X > length ( P_latest #projection_v1.all_members ) * 2 - >
{ _ V , Hxx } = queue : out ( H2 ) ,
Hxx ;
_ - >
H2
end ,
2015-04-14 09:19:08 +00:00
%% HH = [if is_atom(X) -> X; is_tuple(X) -> {element(1,X), element(2,X)} end || X <- get(react), is_atom(X) orelse size(X) == 3],
%% io:format(user, "HEE120 ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse(HH)]),
2015-04-06 05:16:20 +00:00
2015-04-09 08:13:38 +00:00
? REACT ( { c120 , [ { latest , machi_projection : make_summary ( P_latest ) } ] } ) ,
2015-04-10 12:59:56 +00:00
{ { now_using , FinalProps , P_latest #projection_v1.epoch_number } ,
2015-04-09 05:44:58 +00:00
S #ch_mgr { proj = P_latest , proj_history = H3 } } .
2015-04-06 05:16:20 +00:00
react_to_env_C200 ( Retries , P_latest , S ) - >
? REACT ( c200 ) ,
try
%% TODO: This code works "well enough" without actually
%% telling anybody anything. Do we want to rip this out?
%% Actually implement it? None of the above?
yo : tell_author_yo ( P_latest #projection_v1.author_server )
catch _ Type : _ Err - >
%% io:format(user, "TODO: tell_author_yo is broken: ~p ~p\n",
%% [_Type, _Err]),
ok
end ,
react_to_env_C210 ( Retries , S ) .
react_to_env_C210 ( Retries , #ch_mgr { name = MyName , proj = Proj } = S ) - >
? REACT ( c210 ) ,
sleep_ranked_order ( 10 , 100 , MyName , Proj #projection_v1.all_members ) ,
react_to_env_C220 ( Retries , S ) .
react_to_env_C220 ( Retries , S ) - >
? REACT ( c220 ) ,
react_to_env_A20 ( Retries + 1 , S ) .
react_to_env_C300 ( #projection_v1 { epoch_number = _ Epoch_newprop } = P_newprop ,
#projection_v1 { epoch_number = _ Epoch_latest } = _ P_latest , S ) - >
? REACT ( c300 ) ,
%% This logic moved to A30.
%% NewEpoch = erlang:max(Epoch_newprop, Epoch_latest) + 1,
%% P_newprop2 = P_newprop#projection_v1{epoch_number=NewEpoch},
2015-04-09 08:13:38 +00:00
%% react_to_env_C310(update_checksum(P_newprop2), S).
2015-04-06 05:16:20 +00:00
2015-04-09 08:13:38 +00:00
react_to_env_C310 ( machi_projection : update_checksum ( P_newprop ) , S ) .
2015-04-06 05:16:20 +00:00
react_to_env_C310 ( P_newprop , S ) - >
? REACT ( c310 ) ,
Epoch = P_newprop #projection_v1.epoch_number ,
{ WriteRes , S2 } = cl_write_public_proj_skip_local_error ( Epoch , P_newprop , S ) ,
? REACT ( { c310 , ? LINE ,
2015-04-09 08:13:38 +00:00
[ { newprop , machi_projection : make_summary ( P_newprop ) } ,
2015-04-06 05:16:20 +00:00
{ write_result , WriteRes } ] } ) ,
2015-06-04 08:39:29 +00:00
%% io:format(user, "HEE310 ~w ~w ~w\n", [S#ch_mgr.name, self(), lists:reverse(get(react))]),
2015-04-06 05:16:20 +00:00
react_to_env_A10 ( S2 ) .
2015-04-13 15:54:38 +00:00
calculate_flaps ( P_newprop , _ P_current , _ FlapLimit ,
2015-04-06 05:16:20 +00:00
#ch_mgr { name = MyName , proj_history = H , flap_start = FlapStart ,
2015-06-02 11:32:52 +00:00
flaps = Flaps , runenv = RunEnv1 } = S ) - >
2015-04-06 05:16:20 +00:00
HistoryPs = queue : to_list ( H ) ,
Ps = HistoryPs ++ [ P_newprop ] ,
UniqueProposalSummaries = lists : usort ( [ { P #projection_v1.upi ,
P #projection_v1.repairing ,
P #projection_v1.down } | | P < - Ps ] ) ,
{ _ WhateverUnanimous , BestP , Props , _ S } =
cl_read_latest_projection ( private , S ) ,
2015-04-30 14:16:08 +00:00
NotBestPs = proplists : get_value ( not_unanimous_answers , Props , [ ] ) ,
2015-04-06 05:16:20 +00:00
DownUnion = lists : usort (
lists : flatten (
[ P #projection_v1.down | |
P < - [ BestP | NotBestPs ] ] ) ) ,
HosedTransUnion = proplists : get_value ( trans_all_hosed , Props ) ,
TransFlapCounts0 = proplists : get_value ( trans_all_flap_counts , Props ) ,
%% NOTE: bad_answer_flus are probably due to timeout or some other network
%% glitch, i.e., anything other than {ok, P::projection()}
%% response from machi_flu0:proj_read_latest().
BadFLUs = proplists : get_value ( bad_answer_flus , Props ) ,
RemoteTransFlapCounts1 = lists : keydelete ( MyName , 1 , TransFlapCounts0 ) ,
RemoteTransFlapCounts =
[ X | | { _ FLU , { { _ FlEpk , FlTime } , _ FlapCount } } = X < - RemoteTransFlapCounts1 ,
FlTime / = ? NOT_FLAPPING ] ,
TempNewFlaps = Flaps + 1 ,
TempAllFlapCounts = lists : sort ( [ { MyName , { FlapStart , TempNewFlaps } } |
RemoteTransFlapCounts ] ) ,
%% Sanity check.
true = lists : all ( fun ( { _ , { _ , _ } } ) - > true ;
( _ ) - > false end , TempAllFlapCounts ) ,
%% H is the bounded history of all of this manager's private
%% projection store writes. If we've proposed the *same*
%% {UPI+Repairing, Down} combination for the entire length of our
%% bounded size of H, then we're flapping.
%%
%% If we're flapping, then we use our own flap counter and that of
%% all of our peer managers to see if we've all got flap counters
%% that exceed the flap_limit. If that global condition appears
%% true, then we "blow the circuit breaker" by stopping our
%% participation in the flapping store (via the shortcut to A50).
%%
%% We reset our flap counter on any of several conditions:
%%
%% 1. If our bounded history H contains more than one proposal,
%% then by definition we are not flapping.
%% 2. If a remote manager is flapping and has re-started a new
%% flapping episode.
%% 3. If one of the remote managers that we saw earlier has
%% stopped flapping.
? REACT ( { calculate_flaps , queue : len ( H ) , UniqueProposalSummaries } ) ,
case { queue : len ( H ) , UniqueProposalSummaries } of
{ N , [ _ ] } when N > = length ( P_newprop #projection_v1.all_members ) - >
NewFlaps = TempNewFlaps ,
if element ( 2 , FlapStart ) == ? NOT_FLAPPING - >
NewFlapStart = { { epk , P_newprop #projection_v1.epoch_number } , now ( ) } ;
true - >
NewFlapStart = FlapStart
end ,
%% Wow, this behavior is almost spooky.
%%
%% For an example partition map [{c,a}], on the very first
%% time this 'if' clause is hit by FLU b, AllHosed=[a,c].
%% How the heck does B know that??
%%
%% If I use:
%% DownUnionQQQ = [{P#projection_v1.epoch_number, P#projection_v1.author_server, P#projection_v1.down} || P <- [BestP|NotBestPs]],
%% AllHosed = [x_1] ++ DownUnion ++ [x_2] ++ HosedTransUnion ++ [x_3] ++ BadFLUs ++ [{downunionqqq, DownUnionQQQ}];
%%
%% ... then b sees this when proposing epoch 451:
%%
%% {all_hosed,
%% [x_1,a,c,x_2,x_3,
%% {downunionqqq,
%% [{450,a,[c]},{449,b,[]},{448,c,[a]},{441,d,[]}]}]},
%%
%% So b's working on epoch 451 at the same time that d's latest
%% public projection is only epoch 441. But there's enough
2015-05-07 09:43:51 +00:00
%% lag so that b can "see" that a's bad=[c] (due to
%% {error,partition}!) and c's bad=[a]. So voila, b
%% magically knows about both problem FLUs. Weird/cool.
2015-04-06 05:16:20 +00:00
AllFlapCounts = TempAllFlapCounts ,
AllHosed = lists : usort ( DownUnion ++ HosedTransUnion ++ BadFLUs ) ;
{ _ N , _ } - >
NewFlaps = 0 ,
NewFlapStart = { { epk , - 1 } , ? NOT_FLAPPING } ,
AllFlapCounts = [ ] ,
AllHosed = [ ]
end ,
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
FlappingI = make_flapping_i ( NewFlapStart , NewFlaps , AllHosed ,
AllFlapCounts , BadFLUs ) ,
2015-04-14 07:17:49 +00:00
%% NOTE: Just because we increment flaps here, there's no correlation
%% to successful public proj store writes! For example,
2015-04-06 05:16:20 +00:00
%% if we loop through states C2xx a few times, we would incr
%% flaps each time ... but the C2xx path doesn't write a new
2015-04-14 07:17:49 +00:00
%% proposal to everyone's public proj stores. Similarly,
%% if we go through to C300, we will *try* to write to all public
%% stores, but the C3xx path doesn't care if all of those write
%% attempts *fail*. Our flap count is a rough heuristic only, and
%% a large local flaps count gives no concrete guarantee that any
%% communication has been successful with any other part of the
%% cluster.
2015-06-02 11:32:52 +00:00
%% TODO: 2015-03-04: I'm growing increasingly suspicious of
%% the 'runenv' variable that's threaded through all this code.
%% It isn't doing what I'd originally intended. Fix it.
{ machi_projection : update_checksum ( P_newprop #projection_v1 {
flap = FlappingI } ) ,
S #ch_mgr { flaps = NewFlaps , flap_start = NewFlapStart , runenv = RunEnv1 } } .
2015-04-06 05:16:20 +00:00
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
make_flapping_i ( ) - >
make_flapping_i ( { { epk , - 1 } , ? NOT_FLAPPING } , 0 , [ ] , [ ] , [ ] ) .
make_flapping_i ( NewFlapStart , NewFlaps , AllHosed , AllFlapCounts , BadFLUs ) - >
2015-06-02 11:32:52 +00:00
#flap_i { flap_count = { NewFlapStart , NewFlaps } ,
all_hosed = AllHosed ,
all_flap_counts = lists : sort ( AllFlapCounts ) ,
bad = BadFLUs } .
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
2015-04-06 05:16:20 +00:00
projection_transitions_are_sane ( Ps , RelativeToServer ) - >
projection_transitions_are_sane ( Ps , RelativeToServer , false ) .
- ifdef ( TEST ) .
projection_transitions_are_sane_retrospective ( Ps , RelativeToServer ) - >
projection_transitions_are_sane ( Ps , RelativeToServer , true ) .
- endif . % TEST
projection_transitions_are_sane ( [ ] , _ RelativeToServer , _ RetrospectiveP ) - >
true ;
projection_transitions_are_sane ( [ _ ] , _ RelativeToServer , _ RetrospectiveP ) - >
true ;
projection_transitions_are_sane ( [ P1 , P2 | T ] , RelativeToServer , RetrospectiveP ) - >
case projection_transition_is_sane ( P1 , P2 , RelativeToServer ,
RetrospectiveP ) of
true - >
projection_transitions_are_sane ( [ P2 | T ] , RelativeToServer ,
RetrospectiveP ) ;
Else - >
Else
end .
projection_transition_is_sane ( P1 , P2 , RelativeToServer ) - >
projection_transition_is_sane ( P1 , P2 , RelativeToServer , false ) .
- ifdef ( TEST ) .
projection_transition_is_sane_retrospective ( P1 , P2 , RelativeToServer ) - >
projection_transition_is_sane ( P1 , P2 , RelativeToServer , true ) .
- endif . % TEST
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
projection_transition_is_sane (
#projection_v1 { epoch_number = Epoch1 ,
epoch_csum = CSum1 ,
creation_time = CreationTime1 ,
author_server = AuthorServer1 ,
all_members = All_list1 ,
down = Down_list1 ,
upi = UPI_list1 ,
repairing = Repairing_list1 ,
dbg = Dbg1 } = P1 ,
#projection_v1 { epoch_number = Epoch2 ,
epoch_csum = CSum2 ,
creation_time = CreationTime2 ,
author_server = AuthorServer2 ,
all_members = All_list2 ,
down = Down_list2 ,
upi = UPI_list2 ,
repairing = Repairing_list2 ,
dbg = Dbg2 } = P2 ,
RelativeToServer , RetrospectiveP ) - >
try
put ( why2 , [ ] ) ,
%% General notes:
%%
%% I'm making no attempt to be "efficient" here. All of these data
%% structures are small, and the funcs aren't called zillions of times per
%% second.
true = is_integer ( Epoch1 ) andalso is_integer ( Epoch2 ) ,
true = is_binary ( CSum1 ) andalso is_binary ( CSum2 ) ,
{ _ , _ , _ } = CreationTime1 ,
{ _ , _ , _ } = CreationTime2 ,
true = is_atom ( AuthorServer1 ) andalso is_atom ( AuthorServer2 ) , % todo type may change?
true = is_list ( All_list1 ) andalso is_list ( All_list2 ) ,
true = is_list ( Down_list1 ) andalso is_list ( Down_list2 ) ,
true = is_list ( UPI_list1 ) andalso is_list ( UPI_list2 ) ,
true = is_list ( Repairing_list1 ) andalso is_list ( Repairing_list2 ) ,
true = is_list ( Dbg1 ) andalso is_list ( Dbg2 ) ,
true = Epoch2 > Epoch1 ,
All_list1 = All_list2 , % todo will probably change
%% No duplicates
true = lists : sort ( Down_list2 ) == lists : usort ( Down_list2 ) ,
true = lists : sort ( UPI_list2 ) == lists : usort ( UPI_list2 ) ,
true = lists : sort ( Repairing_list2 ) == lists : usort ( Repairing_list2 ) ,
%% Disjoint-ness
true = lists : sort ( All_list2 ) == lists : sort ( Down_list2 ++ UPI_list2 ++
Repairing_list2 ) ,
[ ] = [ X | | X < - Down_list2 , not lists : member ( X , All_list2 ) ] ,
[ ] = [ X | | X < - UPI_list2 , not lists : member ( X , All_list2 ) ] ,
[ ] = [ X | | X < - Repairing_list2 , not lists : member ( X , All_list2 ) ] ,
DownS2 = sets : from_list ( Down_list2 ) ,
UPIS2 = sets : from_list ( UPI_list2 ) ,
RepairingS2 = sets : from_list ( Repairing_list2 ) ,
true = sets : is_disjoint ( DownS2 , UPIS2 ) ,
true = sets : is_disjoint ( DownS2 , RepairingS2 ) ,
true = sets : is_disjoint ( UPIS2 , RepairingS2 ) ,
? RETURN2 (
chain_state_transition_is_sane ( AuthorServer1 , UPI_list1 , Repairing_list1 ,
AuthorServer2 , UPI_list2 ) )
catch
_ Type : _ Err - >
? RETURN2 ( oops ) ,
S1 = machi_projection : make_summary ( P1 ) ,
S2 = machi_projection : make_summary ( P2 ) ,
Trace = erlang : get_stacktrace ( ) ,
{ err , _ Type , _ Err , from , S1 , to , S2 , relative_to , RelativeToServer ,
history , ( catch lists : sort ( [ no_history ] ) ) ,
stack , Trace }
end .
2015-04-06 05:16:20 +00:00
sleep_ranked_order ( MinSleep , MaxSleep , FLU , FLU_list ) - >
2015-04-09 08:13:38 +00:00
USec = calc_sleep_ranked_order ( MinSleep , MaxSleep , FLU , FLU_list ) ,
timer : sleep ( USec ) ,
USec .
calc_sleep_ranked_order ( MinSleep , MaxSleep , FLU , FLU_list ) - >
Chain manager bug fixes & enhancment (more...)
* Set max length of a chain at -define(MAX_CHAIN_LENGTH, 64).
* Perturb tick sleep time of each manager
* If a chain manager L has zero members in its chain, and then its local
public projection store (authored by some remote author R) has a projection
that contains L, then adopt R's projection and start humming consensus.
* Handle "cross-talk" across projection stores, when chain membership
is changed administratively, e.g. chain was [a,b,c] then changed to merely
[a], but that change only happens on a. Servers b & c continue to use
stale projections and scribble their projection suggestions to a, causing
it to flap.
What's really cool about the flapping handling is that it *works*. I
wasn't thinking about this scenario when designing the flapping logic, but
it's really nifty that this extra scenario causes a to flap and then a's
inner projection remains stable, yay!
* Add complaints when "cross-talk" is observed.
* Fix flapping sleep time throttle.
* Fix bug in the machi_projection_store.erl's bookkeeping of the
max epoch number when flapping.
2015-05-11 09:41:45 +00:00
Front = lists : takewhile ( fun ( X ) - > X / = FLU end ,
lists : reverse ( lists : sort ( FLU_list ) ) ) ,
2015-05-06 02:41:04 +00:00
Index = length ( Front ) ,
2015-04-06 05:16:20 +00:00
NumNodes = length ( FLU_list ) ,
2015-04-30 08:28:43 +00:00
SleepChunk = if NumNodes == 0 - > 0 ;
2015-05-06 02:41:04 +00:00
true - > ( MaxSleep - MinSleep ) div NumNodes
2015-04-30 08:28:43 +00:00
end ,
2015-05-06 02:41:04 +00:00
MinSleep + ( SleepChunk * Index ) .
2015-04-06 05:16:20 +00:00
2015-06-02 11:32:52 +00:00
get_raw_flapping_i ( #projection_v1 { flap = F } ) - >
F .
2015-04-06 05:16:20 +00:00
get_flap_count ( P ) - >
2015-06-02 11:32:52 +00:00
case get_raw_flapping_i ( P ) of undefined - > { 0 , 0 } ;
F - > F #flap_i.flap_count
end .
2015-04-06 05:16:20 +00:00
get_all_flap_counts ( P ) - >
2015-06-02 11:32:52 +00:00
case get_raw_flapping_i ( P ) of undefined - > [ ] ;
F - > F #flap_i.all_flap_counts
end .
2015-04-06 05:16:20 +00:00
get_all_hosed ( P ) when is_record ( P , projection_v1 ) - >
2015-06-02 11:32:52 +00:00
case get_raw_flapping_i ( P ) of undefined - > [ ] ;
F - > F #flap_i.all_hosed
end .
2015-04-06 05:16:20 +00:00
merge_flap_counts ( FlapCounts ) - >
merge_flap_counts ( FlapCounts , orddict : new ( ) ) .
merge_flap_counts ( [ ] , D ) - >
orddict : to_list ( D ) ;
merge_flap_counts ( [ FlapCount | Rest ] , D1 ) - >
%% We know that FlapCount is list({Actor, {{_epk,FlapStartTime},NumFlaps}}).
D2 = orddict : from_list ( FlapCount ) ,
2015-04-14 07:17:49 +00:00
D2 = orddict : from_list ( FlapCount ) ,
2015-04-06 05:16:20 +00:00
%% If the FlapStartTimes are identical, then pick the bigger flap count.
%% If the FlapStartTimes differ, then pick the larger start time tuple.
D3 = orddict : merge ( fun ( _ Key , { { _ , T1 } , NF1 } = V1 , { { _ , T2 } , NF2 } = V2 )
when T1 == T2 - >
if NF1 > NF2 - >
V1 ;
true - >
V2
end ;
( _ Key , { { _ , T1 } , _ NF1 } = V1 , { { _ , T2 } , _ NF2 } = V2 ) - >
if T1 > T2 - >
V1 ;
true - >
V2
end ;
( _ Key , V1 , V2 ) - >
exit ( { bad_merge_2tuples , mod , ? MODULE , line , ? LINE ,
_ Key , V1 , V2 } )
end , D1 , D2 ) ,
merge_flap_counts ( Rest , D3 ) .
2015-04-09 05:44:58 +00:00
proxy_pid ( Name , #ch_mgr { proxies_dict = ProxiesDict } ) - >
orddict : fetch ( Name , ProxiesDict ) .
2015-04-10 12:59:56 +00:00
gimme_random_uniform ( N , S ) - >
RunEnv1 = S #ch_mgr.runenv ,
Seed1 = proplists : get_value ( seed , RunEnv1 ) ,
{ X , Seed2 } = random : uniform_s ( N , Seed1 ) ,
RunEnv2 = [ { seed , Seed2 } | lists : keydelete ( seed , 1 , RunEnv1 ) ] ,
{ X , S #ch_mgr { runenv = RunEnv2 } } .
2015-06-02 11:55:18 +00:00
inner_projection_exists ( #projection_v1 { inner = undefined } ) - >
false ;
inner_projection_exists ( #projection_v1 { inner = _ } ) - >
true .
2015-04-10 12:59:56 +00:00
inner_projection_or_self ( P ) - >
2015-06-02 11:55:18 +00:00
case inner_projection_exists ( P ) of
false - >
2015-04-10 12:59:56 +00:00
P ;
2015-06-02 11:55:18 +00:00
true - >
P #projection_v1.inner
2015-04-10 12:59:56 +00:00
end .
2015-05-02 07:59:28 +00:00
make_chmgr_regname ( A ) when is_atom ( A ) - >
2015-04-30 08:28:43 +00:00
list_to_atom ( atom_to_list ( A ) ++ " _chmgr " ) ;
2015-05-02 07:59:28 +00:00
make_chmgr_regname ( B ) when is_binary ( B ) - >
2015-04-30 08:28:43 +00:00
list_to_atom ( binary_to_list ( B ) ++ " _chmgr " ) .
2015-04-06 05:16:20 +00:00
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2015-05-11 10:50:13 +00:00
perhaps_start_repair (
#ch_mgr { name = MyName ,
repair_worker = undefined ,
proj = #projection_v1 { creation_time = Start ,
upi = [ _ | _ ] = UPI ,
repairing = [ _ | _ ] } } = S ) - >
2015-05-12 12:45:40 +00:00
RepairId = { MyName , os : timestamp ( ) } ,
2015-05-12 13:42:03 +00:00
RepairOpts = [ { repair_mode , repair } , verbose , { repair_id , RepairId } ] ,
%% RepairOpts = [{repair_mode, check}, verbose],
2015-05-12 12:45:40 +00:00
RepairFun = fun ( ) - > do_repair ( S , RepairOpts , ap_mode ) end ,
2015-05-11 10:50:13 +00:00
LastUPI = lists : last ( UPI ) ,
2015-05-19 10:32:48 +00:00
IgnoreStabilityTime_p = proplists : get_value ( ignore_stability_time ,
S #ch_mgr.opts , false ) ,
2015-05-11 10:50:13 +00:00
case timer : now_diff ( os : timestamp ( ) , Start ) div 1000000 of
2015-05-19 10:32:48 +00:00
N when MyName == LastUPI andalso
( IgnoreStabilityTime_p orelse
N > = ? REPAIR_START_STABILITY_TIME ) - >
2015-05-12 03:56:41 +00:00
{ WorkerPid , _ Ref } = spawn_monitor ( RepairFun ) ,
2015-05-11 10:50:13 +00:00
S #ch_mgr { repair_worker = WorkerPid ,
repair_start = os : timestamp ( ) ,
repair_final_status = undefined } ;
_ - >
S
end ;
perhaps_start_repair ( S ) - >
S .
2015-05-12 14:37:20 +00:00
do_repair (
#ch_mgr { name = MyName ,
proj = #projection_v1 { upi = UPI ,
2015-05-13 08:58:54 +00:00
repairing = [ _ | _ ] = Repairing ,
2015-05-12 14:37:20 +00:00
members_dict = MembersDict } } = _ S_copy ,
2015-05-16 08:39:58 +00:00
Opts , ap_mode = RepairMode ) - >
2015-05-12 14:37:20 +00:00
T1 = os : timestamp ( ) ,
RepairId = proplists : get_value ( repair_id , Opts , id1 ) ,
2015-05-16 08:39:58 +00:00
error_logger : info_msg ( " Repair start: tail ~p of ~p -> ~p , ~p ID ~w \n " ,
[ MyName , UPI , Repairing , RepairMode , RepairId ] ) ,
2015-05-12 14:37:20 +00:00
ETS = ets : new ( repair_stats , [ private , set ] ) ,
ETS_T_Keys = [ t_in_files , t_in_chunks , t_in_bytes ,
t_out_files , t_out_chunks , t_out_bytes ,
t_bad_chunks , t_elapsed_seconds ] ,
[ ets : insert ( ETS , { K , 0 } ) | | K < - ETS_T_Keys ] ,
2015-05-15 08:15:02 +00:00
Res = machi_chain_repair : repair ( ap_mode , MyName , Repairing , UPI ,
MembersDict , ETS , Opts ) ,
2015-05-12 14:37:20 +00:00
T2 = os : timestamp ( ) ,
Elapsed = ( timer : now_diff ( T2 , T1 ) div 1000 ) / 1000 ,
ets : insert ( ETS , { t_elapsed_seconds , Elapsed } ) ,
Summary = case Res of ok - > " success " ;
_ - > " FAILURE "
end ,
Stats = [ { K , ets : lookup_element ( ETS , K , 2 ) } | | K < - ETS_T_Keys ] ,
2015-05-16 08:39:58 +00:00
error_logger : info_msg ( " Repair ~s : tail ~p of ~p finished ~p repair ID ~w : "
2015-07-01 06:37:53 +00:00
" ~p \n Stats ~p \n " ,
2015-05-16 08:39:58 +00:00
[ Summary , MyName , UPI , RepairMode , RepairId ,
Res , Stats ] ) ,
2015-05-12 14:37:20 +00:00
ets : delete ( ETS ) ,
2015-05-16 08:39:58 +00:00
exit ( { repair_final_status , Res } ) .
2015-05-12 14:37:20 +00:00
2015-05-13 08:58:54 +00:00
sanitize_repair_state ( #ch_mgr { repair_final_status = Res ,
proj = #projection_v1 { upi = [ _ | _ ] } } = S )
2015-05-11 10:50:13 +00:00
when Res / = undefined - >
S #ch_mgr { repair_worker = undefined , repair_start = undefined ,
repair_final_status = undefined } ;
sanitize_repair_state ( S ) - >
S .
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2015-04-06 05:16:20 +00:00
perhaps_call_t ( S , Partitions , FLU , DoIt ) - >
try
perhaps_call ( S , Partitions , FLU , DoIt )
catch
exit : timeout - >
2015-05-01 15:33:49 +00:00
{ error , partition } ;
exit : { timeout , _ } - >
{ error , partition }
2015-04-06 05:16:20 +00:00
end .
2015-04-09 08:13:38 +00:00
perhaps_call ( #ch_mgr { name = MyName } = S , Partitions , FLU , DoIt ) - >
2015-04-09 05:44:58 +00:00
ProxyPid = proxy_pid ( FLU , S ) ,
2015-04-09 08:13:38 +00:00
RemoteFLU_p = FLU / = MyName ,
2015-05-01 15:33:49 +00:00
erase ( bad_sock ) ,
2015-04-06 05:16:20 +00:00
case RemoteFLU_p andalso lists : member ( { MyName , FLU } , Partitions ) of
false - >
2015-04-09 05:44:58 +00:00
Res = DoIt ( ProxyPid ) ,
2015-05-01 15:33:49 +00:00
if Res == { error , partition } - >
remember_partition_hack ( FLU ) ;
true - >
ok
end ,
2015-04-06 05:16:20 +00:00
case RemoteFLU_p andalso lists : member ( { FLU , MyName } , Partitions ) of
false - >
Res ;
_ - >
2015-04-09 08:13:38 +00:00
( catch put ( react , [ { timeout2 , me , MyName , to , FLU , RemoteFLU_p , Partitions } | get ( react ) ] ) ) ,
2015-04-06 05:16:20 +00:00
exit ( timeout )
end ;
_ - >
2015-04-09 08:13:38 +00:00
( catch put ( react , [ { timeout1 , me , MyName , to , FLU , RemoteFLU_p , Partitions } | get ( react ) ] ) ) ,
2015-04-06 05:16:20 +00:00
exit ( timeout )
end .
2015-05-01 15:33:49 +00:00
init_remember_partition_hack ( ) - >
put ( remember_partition_hack , [ ] ) .
remember_partition_hack ( FLU ) - >
put ( remember_partition_hack , [ FLU | get ( remember_partition_hack ) ] ) .
2015-04-06 05:16:20 +00:00
2015-05-01 15:33:49 +00:00
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2015-07-03 10:21:41 +00:00
%% @doc A simple technique for checking chain state transition safety.
%%
%% Math tells us that any change state `UPI1' plus `Repair1' to state
%% `UPI2' is OK as long as `UPI2' is a concatenation of some
%% order-preserving combination from `UPI1' with some order-preserving
%% combination from `Repair1'.
%%
%% ```
%% Good_UPI2s = [ X ++ Y || X <- machi_util:ordered_combinations(UPI1),
%% Y <- machi_util:ordered_combinations(Repair1)]'''
%%
%% Rather than creating that list and then checking if `UPI2' is in
%% it, we try a `diff'-like technique to check for basic state
%% transition safety. See docs for {@link mk/3} for more detail.
%%
%% ```
%% 2> machi_chain_manager1:mk([a,b], [], [a]).
%% {[keep,del],[]} %% good transition
%% 3> machi_chain_manager1:mk([a,b], [], [b,a]).
%% {[del,keep],[]} %% bad transition: too few 'keep' for UPI2's length 2
%% 4> machi_chain_manager1:mk([a,b], [c,d,e], [a,d]).
%% {[keep,del],[2]} %% good transition
%% 5> machi_chain_manager1:mk([a,b], [c,d,e], [a,bogus]).
%% {[keep,del],[error]} %% bad transition: 'bogus' not in Repair1'''
simple_chain_state_transition_is_sane ( UPI1 , Repair1 , UPI2 ) - >
2015-07-03 13:05:35 +00:00
simple_chain_state_transition_is_sane ( undefined , UPI1 , Repair1 ,
undefined , UPI2 ) .
simple_chain_state_transition_is_sane ( Author1 , UPI1 , Repair1 , Author2 , UPI2 ) - >
2015-07-03 14:17:34 +00:00
put ( why2 , [ ] ) ,
2015-07-03 10:21:41 +00:00
{ KeepsDels , Orders } = mk ( UPI1 , Repair1 , UPI2 ) ,
NumKeeps = length ( [ x | | keep < - KeepsDels ] ) ,
NumOrders = length ( Orders ) ,
2015-07-03 13:05:35 +00:00
Answer1 = false == lists : member ( error , Orders )
andalso Orders == lists : sort ( Orders )
andalso length ( UPI2 ) == NumKeeps + NumOrders ,
if not Answer1 - >
2015-07-03 14:17:34 +00:00
? RETURN2 ( Answer1 ) ;
2015-07-03 13:05:35 +00:00
true - >
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
if Orders == [ ] - >
%% No repairing have joined UPI2. Keep original answer.
2015-07-03 14:17:34 +00:00
? RETURN2 ( Answer1 ) ;
2015-07-03 13:05:35 +00:00
Author2 == undefined - >
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
%% At least one Repairing1 element is now in UPI2.
%% We need Author2 to make better decision. Go
%% with what we know, silly caller for not giving
%% us what we need.
2015-07-03 14:17:34 +00:00
? RETURN2 ( Answer1 ) ;
2015-07-03 13:05:35 +00:00
Author2 / = undefined - >
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
%% At least one Repairing1 element is now in UPI2.
%% We permit only the tail to author such a UPI2.
2015-07-03 13:05:35 +00:00
case catch ( lists : last ( UPI1 ) ) of
UPI1_tail when UPI1_tail == Author2 - >
2015-07-03 14:17:34 +00:00
? RETURN2 ( true ) ;
2015-07-03 13:05:35 +00:00
_ - >
2015-07-03 14:17:34 +00:00
? RETURN2 ( false )
2015-07-03 13:05:35 +00:00
end
end
end .
2015-07-03 10:21:41 +00:00
chain_state_transition_is_sane ( Author1 , UPI1 , Repair1 , Author2 , UPI2 ) - >
ToSelfOnly_p = if UPI2 == [ Author2 ] - > true ;
true - > false
end ,
2015-07-03 13:05:35 +00:00
Disjoint_UPIs = ordsets : is_disjoint ( ordsets : from_list ( UPI1 ) ,
ordsets : from_list ( UPI2 ) ) ,
Author2_is_Repairer_p = case ( catch lists : last ( UPI1 ) ) of
FLU when FLU == Author2 - >
true ;
_ - >
false
end ,
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
%% This if statement contains the only exceptions that we make to
%% the judgement of simple_chain_state_transition_is_sane().
2015-07-03 10:21:41 +00:00
if ToSelfOnly_p - >
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
%% The transition is to UPI2=[Author2].
%% For AP mode, this transition is always safe.
2015-07-03 14:17:34 +00:00
? RETURN2 ( true ) ;
2015-07-03 13:05:35 +00:00
Disjoint_UPIs - >
WIP: 1st part of moving old chain state transtion code to new
Ha, famous last words, amirite?
%% The chain sequence/order checks at the bottom of this function aren't
%% as easy-to-read as they ought to be. However, I'm moderately confident
%% that it isn't buggy. TODO: refactor them for clarity.
So, now machi_chain_manager1:projection_transition_is_sane() is using
newer, far less buggy code to make sanity decisions.
TODO: Add support for Retrospective mode. TODO is it really needed?
Examples of how the old code sucks and the new code sucks less.
138> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxxx..x.xxxxxx..x.x....x..xx........................................................Failed! After 69 tests.
[a,b,c]
{c,[a,b,c],[c,b],b,[b,a],[b,a,c]}
Old_res ([335,192,166,160,153,139]): true
New_res: false (why line [1936])
Shrinking xxxxxxxxxxxx.xxxxxxx.xxx.xxxxxxxxxxxxxxxxx(3 times)
[a,b,c]
%% {Author1,UPI1, Repair1,Author2,UPI2, Repair2} %%
{c, [a,b,c],[], a, [b,a],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: we've swapped order of a & b, which is bad.
139> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxx..x...xx..........xxx..x..............x......x............................................(x10)...(x1)........Failed! After 120 tests.
[b,c,a]
{c,[c,a],[c],a,[a,b],[b,a]}
Old_res ([335,192,185,160,153,123]): true
New_res: false (why line [1936])
Shrinking xx.xxxxxx.x.xxxxxxxx.xxxxxxxxxxx(4 times)
[b,a,c]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{a, [c], [], c, [c,b],[]}
Old_res ([338,185,160,153,147]): true
New_res: false (why line [1936])
false
Old code is wrong: b wasn't repairing in the previous state.
150> eqc:quickcheck(eqc:testing_time(10, machi_chain_manager1_test:prop_compare_legacy_with_v2_chain_transition_check(whole))).
xxxxxxxxxxx....x...xxxxx..xx.....x.......xxx..x.......xxx...................x................x......(x10).....(x1)........xFailed! After 130 tests.
[c,a,b]
{b,[c],[b,a,c],c,[c,a,b],[b]}
Old_res ([335,214,185,160,153,147]): true
New_res: false (why line [1936])
Shrinking xxxx.x.xxx.xxxxxxx.xxxxxxxxx(4 times)
[c,b,a]
%% {Author1,UPI1,Repair1,Author2,UPI2, Repair2} %%
{c, [c], [a,b], c, [c,b,a],[]}
Old_res ([335,328,185,160,153,111]): true
New_res: false (why line [1981,1679])
false
Old code is wrong: a & b were repairing but UPI2 has a & b in the wrong order.
2015-07-03 15:01:54 +00:00
%% The transition from UPI1 -> UPI2 where the two are
%% disjoint/no FLUs in common.
%% For AP mode, this transition is always safe.
2015-07-03 14:17:34 +00:00
? RETURN2 ( true ) ;
2015-07-03 10:21:41 +00:00
true - >
2015-07-03 13:05:35 +00:00
simple_chain_state_transition_is_sane ( Author1 , UPI1 , Repair1 ,
Author2 , UPI2 )
2015-07-03 10:21:41 +00:00
end .
%% @doc Create a 2-tuple that describes how `UPI1' + `Repair1' are
%% transformed into `UPI2' in a chain state change.
%%
%% The 1st part of the 2-tuple is a list of `keep' and `del' instructions,
%% relative to the items in UPI1 and whether they are present (`keep') or
%% absent (`del') in `UPI2'.
%%
%% The 2nd part of the 2-tuple is `list(non_neg_integer()|error)' that
%% describes the relative order of items in `Repair1' that appear in
%% `UPI2'. The `error' atom is used to denote items not present in
%% `Repair1'.
mk ( UPI1 , Repair1 , UPI2 ) - >
mk ( UPI1 , Repair1 , UPI2 , [ ] ) .
mk ( [ X | UPI1 ] , Repair1 , [ X | UPI2 ] , Acc ) - >
mk ( UPI1 , Repair1 , UPI2 , [ keep | Acc ] ) ;
mk ( [ X | UPI1 ] , Repair1 , UPI2 , Acc ) - >
mk ( UPI1 , Repair1 , UPI2 -- [ X ] , [ del | Acc ] ) ;
mk ( [ ] , [ ] , [ ] , Acc ) - >
{ lists : reverse ( Acc ) , [ ] } ;
mk ( [ ] , Repair1 , UPI2 , Acc ) - >
{ lists : reverse ( Acc ) , machi_util : mk_order ( UPI2 , Repair1 ) } .