diff --git a/c_src/bdberl_drv.c b/c_src/bdberl_drv.c index e8310bb..0c9292a 100644 --- a/c_src/bdberl_drv.c +++ b/c_src/bdberl_drv.c @@ -37,6 +37,7 @@ static void do_async_truncate(void* arg); static void do_async_stat(void* arg); static void do_async_lock_stat(void* arg); static void do_async_log_stat(void* arg); +static void do_async_memp_stat(void* arg); static int add_dbref(PortData* data, int dbref); static int del_dbref(PortData* data, int dbref); @@ -926,6 +927,45 @@ static int bdberl_drv_control(ErlDrvData handle, unsigned int cmd, int rc = G_DB_ENV->log_stat_print(G_DB_ENV, flags); RETURN_INT(rc, outbuf); } + case CMD_MEMP_STAT: + { + FAIL_IF_ASYNC_PENDING(d, outbuf); + + // Inbuf is <> + // If the working buffer is large enough, copy the data to put/get into it. Otherwise, realloc + // until it is large enough + if (d->work_buffer_sz < inbuf_sz) + { + d->work_buffer = driver_realloc(d->work_buffer, inbuf_sz); + d->work_buffer_sz = inbuf_sz; + } + + // Copy the payload into place + memcpy(d->work_buffer, inbuf, inbuf_sz); + d->work_buffer_offset = inbuf_sz; + + // Mark the port as busy and then schedule the appropriate async operation + d->async_op = cmd; + d->async_pool = G_TPOOL_GENERAL; + bdberl_tpool_run(d->async_pool, &do_async_memp_stat, d, 0, &d->async_job); + + // Let caller know that the operation is in progress + // Outbuf is: <<0:32>> + RETURN_INT(0, outbuf); + } + case CMD_MEMP_STAT_PRINT: + { + FAIL_IF_ASYNC_PENDING(d, outbuf); + + // Inbuf is << Flags:32 >> + unsigned int flags = UNPACK_INT(inbuf, 0); + + // Outbuf is <> + // Run the command on the VM thread - this is for debugging only, + // any real monitoring will use the async lock_stat + int rc = G_DB_ENV->memp_stat_print(G_DB_ENV, flags); + RETURN_INT(rc, outbuf); + } } *outbuf = 0; return 0; @@ -1503,6 +1543,104 @@ static void async_cleanup_and_send_log_stats(PortData* d, DB_LOG_STAT *lsp) driver_send_term(port, pid, response, sizeof(response) / sizeof(response[0])); } +static void send_mpool_fstat(ErlDrvPort port, ErlDrvTermData pid, DB_MPOOL_FSTAT *fsp) +{ + char *name = fsp->file_name ? fsp->file_name : ""; + int name_len = strlen(name); + ErlDrvTermData response[] = { + ERL_DRV_ATOM, driver_mk_atom("fstat"), + // Start of list + ERL_DRV_ATOM, driver_mk_atom("name"), + ERL_DRV_STRING, (ErlDrvTermData) name, name_len, + ERL_DRV_TUPLE, 2, + ST_STATS_TUPLE(fsp, map), /* Pages from mapped files. */ + ST_STATS_TUPLE(fsp, cache_hit), /* Pages found in the cache. */ + ST_STATS_TUPLE(fsp, cache_miss), /* Pages not found in the cache. */ + ST_STATS_TUPLE(fsp, page_create), /* Pages created in the cache. */ + ST_STATS_TUPLE(fsp, page_in), /* Pages read in. */ + ST_STATS_TUPLE(fsp, page_out), /* Pages written out. */ + // End of list + ERL_DRV_NIL, + ERL_DRV_LIST, 7+1, + ERL_DRV_TUPLE, 2 + }; + driver_send_term(port, pid, response, sizeof(response) / sizeof(response[0])); +} + +static void async_cleanup_and_send_memp_stats(PortData* d, DB_MPOOL_STAT *gsp, + DB_MPOOL_FSTAT **fsp) +{ + // Save the port and pid references -- we need copies independent from the PortData + // structure. Once we release the port_lock after clearing the cmd, it's possible that + // the port could go away without waiting on us to finish. This is acceptable, but we need + // to be certain that there is no overlap of data between the two threads. driver_send_term + // is safe to use from a thread, even if the port you're sending from has already expired. + ErlDrvPort port = d->port; + ErlDrvTermData pid = d->port_owner; + async_cleanup(d); + + // First send the per-file stats + int i; + for (i = 0; fsp != NULL && fsp[i] != NULL; i++) + { + send_mpool_fstat(port, pid, fsp[i]); + } + + // Then send the global stats + ErlDrvTermData response[] = { + ERL_DRV_ATOM, driver_mk_atom("ok"), + // Start of list + ST_STATS_TUPLE(gsp, gbytes), /* Total cache size: GB. */ + ST_STATS_TUPLE(gsp, bytes), /* Total cache size: B. */ + ST_STATS_TUPLE(gsp, ncache), /* Number of cache regions. */ + ST_STATS_TUPLE(gsp, max_ncache), /* Maximum number of regions. */ + ST_STATS_INT_TUPLE(gsp, mmapsize), /* Maximum file size for mmap. */ + ST_STATS_INT_TUPLE(gsp, maxopenfd), /* Maximum number of open fd's. */ + ST_STATS_INT_TUPLE(gsp, maxwrite), /* Maximum buffers to write. */ + ST_STATS_TUPLE(gsp, maxwrite_sleep), /* Sleep after writing max buffers. */ + ST_STATS_TUPLE(gsp, pages), /* Total number of pages. */ + ST_STATS_TUPLE(gsp, map), /* Pages from mapped files. */ + ST_STATS_TUPLE(gsp, cache_hit), /* Pages found in the cache. */ + ST_STATS_TUPLE(gsp, cache_miss), /* Pages not found in the cache. */ + ST_STATS_TUPLE(gsp, page_create), /* Pages created in the cache. */ + ST_STATS_TUPLE(gsp, page_in), /* Pages read in. */ + ST_STATS_TUPLE(gsp, page_out), /* Pages written out. */ + ST_STATS_TUPLE(gsp, ro_evict), /* Clean pages forced from the cache. */ + ST_STATS_TUPLE(gsp, rw_evict), /* Dirty pages forced from the cache. */ + ST_STATS_TUPLE(gsp, page_trickle), /* Pages written by memp_trickle. */ + ST_STATS_TUPLE(gsp, page_clean), /* Clean pages. */ + ST_STATS_TUPLE(gsp, page_dirty), /* Dirty pages. */ + ST_STATS_TUPLE(gsp, hash_buckets), /* Number of hash buckets. */ + ST_STATS_TUPLE(gsp, hash_searches), /* Total hash chain searches. */ + ST_STATS_TUPLE(gsp, hash_longest), /* Longest hash chain searched. */ + ST_STATS_TUPLE(gsp, hash_examined), /* Total hash entries searched. */ + ST_STATS_TUPLE(gsp, hash_nowait), /* Hash lock granted with nowait. */ + ST_STATS_TUPLE(gsp, hash_wait), /* Hash lock granted after wait. */ + ST_STATS_TUPLE(gsp, hash_max_nowait), /* Max hash lock granted with nowait. */ + ST_STATS_TUPLE(gsp, hash_max_wait), /* Max hash lock granted after wait. */ + ST_STATS_TUPLE(gsp, region_nowait), /* Region lock granted with nowait. */ + ST_STATS_TUPLE(gsp, region_wait), /* Region lock granted after wait. */ + ST_STATS_TUPLE(gsp, mvcc_frozen), /* Buffers frozen. */ + ST_STATS_TUPLE(gsp, mvcc_thawed), /* Buffers thawed. */ + ST_STATS_TUPLE(gsp, mvcc_freed), /* Frozen buffers freed. */ + ST_STATS_TUPLE(gsp, alloc), /* Number of page allocations. */ + ST_STATS_TUPLE(gsp, alloc_buckets), /* Buckets checked during allocation. */ + ST_STATS_TUPLE(gsp, alloc_max_buckets), /* Max checked during allocation. */ + ST_STATS_TUPLE(gsp, alloc_pages), /* Pages checked during allocation. */ + ST_STATS_TUPLE(gsp, alloc_max_pages), /* Max checked during allocation. */ + ST_STATS_TUPLE(gsp, io_wait), /* Thread waited on buffer I/O. */ + ST_STATS_TUPLE(gsp, regsize), /* Region size. */ + + // End of list + ERL_DRV_NIL, + ERL_DRV_LIST, 40+1, + ERL_DRV_TUPLE, 2 + }; + driver_send_term(port, pid, response, sizeof(response) / sizeof(response[0])); +} + + + static void do_async_put(void* arg) { // Payload is: <> @@ -1824,6 +1962,37 @@ static void do_async_log_stat(void* arg) } } +static void do_async_memp_stat(void* arg) +{ + // Payload is: <> + PortData* d = (PortData*)arg; + + // Extract operation flags + unsigned flags = UNPACK_INT(d->work_buffer, 0); + + DB_MPOOL_STAT *gsp = NULL; + DB_MPOOL_FSTAT **fsp = NULL; + int rc = G_DB_ENV->memp_stat(G_DB_ENV, &gsp, &fsp, flags); + if (rc != 0 || gsp == NULL) + { + async_cleanup_and_send_rc(d, rc); + } + else + { + async_cleanup_and_send_memp_stats(d, gsp, fsp); + } + + // Finally, clean up lock stats + if (NULL != gsp) + { + free(gsp); + } + if (NULL != fsp) + { + free(fsp); + } +} + static void* zalloc(unsigned int size) { diff --git a/c_src/bdberl_drv.h b/c_src/bdberl_drv.h index e817c74..bea6194 100644 --- a/c_src/bdberl_drv.h +++ b/c_src/bdberl_drv.h @@ -53,6 +53,8 @@ static int bdberl_drv_control(ErlDrvData handle, unsigned int cmd, #define CMD_LOCK_STAT_PRINT 22 #define CMD_LOG_STAT 23 #define CMD_LOG_STAT_PRINT 24 +#define CMD_MEMP_STAT 25 +#define CMD_MEMP_STAT_PRINT 26 /** * Command status values diff --git a/include/bdberl.hrl b/include/bdberl.hrl index 6fe5137..a206c1a 100644 --- a/include/bdberl.hrl +++ b/include/bdberl.hrl @@ -30,6 +30,8 @@ -define(CMD_LOCK_STAT_PRINT, 22). -define(CMD_LOG_STAT, 23). -define(CMD_LOG_STAT_PRINT, 24). +-define(CMD_MEMP_STAT, 25). +-define(CMD_MEMP_STAT_PRINT,26). -define(DB_TYPE_BTREE, 1). -define(DB_TYPE_HASH, 2). diff --git a/src/bdberl.erl b/src/bdberl.erl index 520db82..974bd22 100644 --- a/src/bdberl.erl +++ b/src/bdberl.erl @@ -20,6 +20,8 @@ lock_stat_print/1, log_stat/1, log_stat_print/1, + memp_stat/1, + memp_stat_print/1, env_stat_print/1, transaction/1, transaction/2, transaction/3, put/3, put/4, @@ -1542,6 +1544,76 @@ log_stat_print(Opts) -> end. +%%-------------------------------------------------------------------- +%% @doc +%% Retrieve memory pool stats +%% +%% This function retrieves bdb mpool statistics +%% +%% === Options === +%% +%%
+%%
stat_clear
+%%
Reset statistics after returning their values
+%%
+%% +%% @spec memp_stat(Opts) -> {ok, [{atom(), number()}]} | {error, Error} +%% where +%% Opts = [atom()] +%% +%% @end +%%-------------------------------------------------------------------- +-spec memp_stat(Opts :: db_flags()) -> + {ok, [{atom(), number()}]} | db_error(). + +memp_stat(Opts) -> + Flags = process_flags(Opts), + Cmd = <>, + <> = erlang:port_control(get_port(), ?CMD_MEMP_STAT, Cmd), + case decode_rc(Result) of + ok -> + recv_memp_stat([]); + Error -> + {error, Error} + end. + + +%%-------------------------------------------------------------------- +%% @doc +%% Print memory pool stats +%% +%% This function prints bdb mpool statistics to wherever +%% BDB messages are being sent +%% +%% === Options === +%% +%%
+%%
stat_all
+%%
Display all available information.
+%%
stat_clear
+%%
Reset statistics after displaying their values.
+%%
stat_memp_hash
+%% +%%
+%% +%% @spec memp_stat_print(Opts) -> ok | {error, Error} +%% where +%% Opts = [atom()] +%% +%% @end +%%-------------------------------------------------------------------- +-spec memp_stat_print(Opts :: db_flags()) -> + ok | db_error(). +memp_stat_print(Opts) -> + Flags = process_flags(Opts), + Cmd = <>, + <> = erlang:port_control(get_port(), ?CMD_MEMP_STAT_PRINT, Cmd), + case decode_rc(Result) of + ok -> ok; + Error -> {error, Error} + end. + + %%-------------------------------------------------------------------- %% @doc %% Print environment stats @@ -1700,6 +1772,7 @@ flag_value(Flag) -> stat_lock_lockers -> ?DB_STAT_LOCK_LOCKERS; stat_lock_objects -> ?DB_STAT_LOCK_OBJECTS; stat_lock_params -> ?DB_STAT_LOCK_PARAMS; + stat_memp_hash -> ?DB_STAT_MEMP_HASH; stat_subsystem -> ?DB_STAT_SUBSYSTEM; threaded -> ?DB_THREAD; truncate -> ?DB_TRUNCATE; @@ -1765,3 +1838,17 @@ split_bin(Delimiter, <>, ItemAcc, Acc) -> split_bin(Delimiter, Rest, <<>> ,[ItemAcc | Acc]); split_bin(Delimiter, <>, ItemAcc, Acc) -> split_bin(Delimiter, Rest, <>, Acc). + + +%% +%% Receive memory pool stats +%% +recv_memp_stat(Fstats) -> + receive + {error, Reason} -> + {error, decode_rc(Reason)}; + {fstat, Fstat} -> + recv_memp_stat([Fstat|Fstats]); + {ok, Stats} -> + {ok, Stats, Fstats} + end. diff --git a/test/bdberl_SUITE.erl b/test/bdberl_SUITE.erl index dbe546e..1c9dd78 100644 --- a/test/bdberl_SUITE.erl +++ b/test/bdberl_SUITE.erl @@ -36,7 +36,8 @@ all() -> hash_stat_should_report_on_success, stat_should_fail_on_bad_dbref, lock_stat_should_report_on_success, - log_stat_should_report_on_success]. + log_stat_should_report_on_success, + memp_stat_should_report_on_success]. dbconfig(Config) -> @@ -291,3 +292,9 @@ log_stat_should_report_on_success(_Config) -> %% Check a log stat that that probably won't change 264584 = proplists:get_value(magic, Stat), done. + +memp_stat_should_report_on_success(_Config) -> + {ok, Gstat, Fstat} = bdberl:memp_stat([]), + true = is_list(Fstat), + true = is_list(Gstat), + done.