From 2763b16ca2b15ac16b0eee3e06bc2e5f3c4ec4aa Mon Sep 17 00:00:00 2001 From: Scott Lystig Fritchie Date: Thu, 25 Jun 2015 16:03:00 +0900 Subject: [PATCH] timing_pb_encoding_test_... speed factor=35.95 [2.730 s] ok So, the PB style encoding of the Mpb_LL_WriteProjectionReq message is about 35-36 times slower than using Erlang's term_to_binary() and binary_to_term(). {sigh} --- src/machi.proto | 2 +- src/machi_pb_wrap.erl | 4 ++-- test/machi_flu1_test.erl | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/src/machi.proto b/src/machi.proto index 2afc2c5..20aeefb 100644 --- a/src/machi.proto +++ b/src/machi.proto @@ -96,7 +96,7 @@ message Mpb_P_Srvr { required string proto_mod = 2; required string address = 3; required string port = 4; - required bytes props = 5; + required bytes opaque_props = 5; } ////////////////////////////////////////// diff --git a/src/machi_pb_wrap.erl b/src/machi_pb_wrap.erl index 9daa54c..ec84de3 100644 --- a/src/machi_pb_wrap.erl +++ b/src/machi_pb_wrap.erl @@ -56,13 +56,13 @@ conv_from_p_srvr(#p_srvr{name=Name, proto_mod=to_list(ProtoMod), address=to_list(Address), port=to_list(Port), - props=enc_sexp(Props)}. + opaque_props=enc_sexp(Props)}. conv_to_p_srvr(#mpb_p_srvr{name=Name, proto_mod=ProtoMod, address=Address, port=Port, - props=Props}) -> + opaque_props=Props}) -> #p_srvr{name=to_atom(Name), proto_mod=to_atom(ProtoMod), address=to_list(Address), diff --git a/test/machi_flu1_test.erl b/test/machi_flu1_test.erl index fa9d05c..c9c24bb 100644 --- a/test/machi_flu1_test.erl +++ b/test/machi_flu1_test.erl @@ -210,5 +210,42 @@ bad_checksum_test() -> ok = ?FLU:stop(FLU1) end. +%% The purpose of timing_pb_encoding_test_ and timing_bif_encoding_test_ is +%% to show the relative speed of the PB encoding of something like a +%% projection store command is about 35x slower than simply using the Erlang +%% BIFs term_to_binary() and binary_to_term(). We try to do enough work, at +%% least a couple of seconds, so that any dynamic CPU voltage adjustment +%% might kick into highest speed, in theory. + +timing_pb_encoding_test_() -> + {timeout, 60, fun() -> timing_pb_encoding_test2() end}. + +timing_pb_encoding_test2() -> + P_a = #p_srvr{name=a, address="localhost", port=4321}, + P1 = machi_projection:new(1, a, [P_a], [], [a], [], []), + DoIt1 = fun() -> + Req = machi_pb_wrap:make_projection_req( + <<1,2,3,4>>, {write_projection, public, P1}), + Bin = list_to_binary(machi_pb:encode_mpb_ll_request(Req)), + ZZ = machi_pb:decode_mpb_ll_request(Bin), + _ = machi_pb_wrap:unmake_projection_req(ZZ) + end, + XX = lists:seq(1,30*1000), + erlang:garbage_collect(), + RUN1 = timer:tc(fun() -> begin [_ = DoIt1() || _ <- XX], ok end end), + erlang:garbage_collect(), + + DoIt2 = fun() -> + Req = term_to_binary({ + <<1,2,3,4>>, {write_projection, public, P1}}), + _ = binary_to_term(Req) + end, + erlang:garbage_collect(), + RUN2 = timer:tc(fun() -> begin [_ = DoIt2() || _ <- XX], ok end end), + erlang:garbage_collect(), + Factor = (element(1, RUN1) / element(1, RUN2)), + io:format(user, " speed factor=~.2f ", [Factor]), + ok. + -endif. % !PULSE -endif. % TEST