Compare commits

..

11 commits

Author SHA1 Message Date
Gregory Burd
169a9663b2 Set a lower bound for session_max to 1024 (which is a guess) or 2x
ring_size (another guess).
2013-03-12 10:29:33 -04:00
Gregory Burd
7a7350be42 Set session max based on ring size. 2013-03-11 20:34:21 -04:00
Gregory Burd
f5a5208b1b Filter only the wt specific parts, not all of app.config 2013-03-11 15:44:19 -04:00
Gregory Burd
db28e6b50d Renaming the repo to wt eventually. 2013-03-11 14:57:46 -04:00
Gregory Burd
0ec817ae9f Restructure the way we create/merge default configuration setttings
with user supplied settings.  Move cache estimate to its own function.
2013-03-11 12:59:31 -04:00
Steve Vinoski
e6dc7a5936 fix new config problems, tests all pass again
The tests pass but there's still a "NIF upgrade" error reported at the end
of each test run. Will fix later.
2013-03-11 11:12:12 -04:00
Gregory Burd
83dfc9e396 Renaming from wterl to wt (less redundant and more meaningful name)
and working on a shared single cache for all vnodes.
2013-03-10 21:42:31 -04:00
Gregory Burd
57917f8bc6 Change the config to use a lsm tree rather than btree for tables.
Stop using direct_io as it forces operations to sync more often, slowing
things down, at the expense of double-buffering (this will use more RAM).
2013-03-08 13:33:12 -05:00
Gregory Burd
3cff357e49 Spelling mistake. 2013-03-08 10:29:54 -05:00
Gregory Burd
137088ff55 Start with a more rational default configuration. (Before you ask... the
answer is 'no').  So far there has been no effort to validate that these
settings are in fact the best for Riak/KV or CS data access patterns.  These
particular settings are, at best, an educated guess based on past experience,
the docs and reading about the benchmark the WiredTiger team published here:

https://github.com/wiredtiger/wiredtiger/wiki/YCSB-Mapkeeper-benchmark
2013-03-07 20:48:26 -05:00
Gregory Burd
a8bc4bf6c0 Pass configuration along when opening sessions. 2013-03-07 20:31:42 -05:00
33 changed files with 2073 additions and 6160 deletions

View file

@ -1,4 +0,0 @@
handle SIGPIPE nostop noprint pass
#b erl_nif.c:1203
#b sys/unix/erl_unix_sys_ddll.c:234

11
.gitignore vendored
View file

@ -1,14 +1,9 @@
*.beam
.eunit
ebin
priv/*.so
c_src/system
c_src/wiredtiger*/
c_src/wiredtiger-*/
c_src/*.o
c_src/bzip2-1.0.6
c_src/snappy-1.0.4
deps/
priv/wt
priv/*.so*
priv/*.dylib*
log/
deps
*~

View file

@ -1,2 +0,0 @@
-author('Steve Vinoski <steve@basho.com>').
-author('Gregory Burd <steve@basho.com>'). % greg@burd.me @gregburd

147
Makefile
View file

@ -1,126 +1,59 @@
# Copyright 2012 Erlware, LLC. All Rights Reserved.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
TARGET= wt
# Source: https://gist.github.com/ericbmerritt/5706091
REBAR= ./rebar
#REBAR= /usr/bin/env rebar
ERL= /usr/bin/env erl
DIALYZER= /usr/bin/env dialyzer
ERLFLAGS= -pa $(CURDIR)/.eunit -pa $(CURDIR)/ebin -pa $(CURDIR)/deps/*/ebin
.PHONY: plt analyze all deps compile get-deps clean
DEPS_PLT=$(CURDIR)/.deps_plt
DEPS=erts kernel stdlib
all: compile
# =============================================================================
# Verify that the programs we need to run are installed on this system
# =============================================================================
ERL = $(shell which erl)
deps: get-deps
ifeq ($(ERL),)
$(error "Erlang not available on this system")
endif
REBAR=$(shell which rebar)
ifeq ($(REBAR),)
$(error "Rebar not available on this system")
endif
DIALYZER=$(shell which dialyzer)
ifeq ($(DIALYZER),)
$(error "Dialyzer not available on this system")
endif
TYPER=$(shell which typer)
ifeq ($(TYPER),)
$(error "Typer not available on this system")
endif
.PHONY: all compile doc clean test dialyzer typer shell distclean pdf \
update-deps clean-common-test-data rebuild
all: deps compile
# =============================================================================
# Rules to build the system
# =============================================================================
deps:
$(REBAR) get-deps
$(REBAR) compile
get-deps:
@$(REBAR) get-deps
update-deps:
$(REBAR) update-deps
$(REBAR) compile
@$(REBAR) update-deps
c_src/wterl.o: c_src/async_nif.h
touch c_src/wterl.c
c_src/wt.o:
touch c_src/wt.c
ebin/app_helper.beam:
@echo You need to:
@echo cp ../riak/deps/riak_core/ebin/app_helper.beam ebin
@/bin/false
compile: c_src/wterl.o ebin/app_helper.beam
$(REBAR) skip_deps=true compile
doc:
$(REBAR) skip_deps=true doc
eunit: compile clean-common-test-data
$(REBAR) skip_deps=true eunit
test: compile eunit
$(DEPS_PLT):
@echo Building local plt at $(DEPS_PLT)
@echo
dialyzer --output_plt $(DEPS_PLT) --build_plt \
--apps $(DEPS) -r deps
dialyzer: $(DEPS_PLT)
$(DIALYZER) --fullpath --plt $(DEPS_PLT) -Wrace_conditions -r ./ebin
typer:
$(TYPER) --plt $(DEPS_PLT) -r ./src
xref:
$(REBAR) xref skip_deps=true
# You often want *rebuilt* rebar tests to be available to the shell you have to
# call eunit (to get the tests rebuilt). However, eunit runs the tests, which
# probably fails (thats probably why You want them in the shell). This
# (prefixing the command with "-") runs eunit but tells make to ignore the
# result.
shell: deps compile
- @$(REBAR) skip_deps=true eunit
@$(ERL) $(ERLFLAGS)
pdf:
pandoc README.md -o README.pdf
compile: c_src/wt.o ebin/app_helper.beam
@$(REBAR) compile
clean:
- c_src/build_deps.sh clean
- rm -rf $(CURDIR)/test/*.beam
- rm -rf $(CURDIR)/logs
- rm -rf $(CURDIR)/ebin
$(REBAR) skip_deps=true clean
@$(REBAR) clean
distclean: clean
- rm -rf $(DEPS_PLT)
- rm -rvf $(CURDIR)/deps
test: eunit
rebuild: distclean deps compile escript dialyzer test
eunit: compile
@$(REBAR) eunit skip_deps=true
eunit_console:
@$(ERL) -pa .eunit deps/*/ebin
plt: compile
@$(DIALYZER) --build_plt --output_plt .$(TARGET).plt -pa deps/*/ebin --apps kernel stdlib
analyze: compile
$(DIALYZER) --plt .$(TARGET).plt -pa deps/*/ebin ebin
repl:
$(ERL) -pz deps/*/ebin -pa ebin
gdb-repl:
USE_GDB=1 $(ERL) -pz deps/*/ebin -pa ebin
eunit-repl:
$(ERL) -pa .eunit -pz deps/*/ebin -pz ebin -exec 'cd(".eunit").'
gdb-eunit-repl:
USE_GDB=1 $(ERL) -pa .eunit -pz deps/*/ebin -pz ebin -exec 'cd(".eunit").'

View file

@ -1,60 +1,32 @@
`wterl` is an Erlang interface to the WiredTiger database, and is written to
support a Riak storage backend that uses WiredTiger.
`wt` is an Erlang interface to the WiredTiger database, and is written
to support a Riak storage backend that uses WiredTiger.
This backend currently supports only key-value storage and retrieval.
Remaining work includes:
TODO:
* Find/fix any code marked "TODO:"
* Why do we see {error, {eperm, _}} result on wterl:cursor_close/1 during
fold_objects/4?
* Why do we see {error, {eperm, _}} result on wterl:cursor_close/1?
* Why do we see {error, {eperm, _}} result on wterl:cursor_next/1 during
is_empty/1?
* Why do we see {error, {eperm, _}} result on wterl:cursor_next_value/1
during status/1?
* Why do we see {error, {ebusy, _}} result on wterl:drop/2?
* Determine a better way to estimate the number of sessions we should
configure WT for at startup in riak_kv_wterl_backend:max_sessions/1.
* Make sure Erlang is optimizing for selective receive in async_nif_enqueue/3
because in the eLevelDB driver there is a comment: "This cannot be a separate
function. Code must be inline to trigger Erlang compiler's use of optimized
selective receive."
* Provide a way to configure the cursor options, right now they are
always "raw,overwrite".
* Add support for Riak/KV 2i indexes using the same design pattern
as eLevelDB (in a future version consider alternate schema)
* If an operation using a shared cursor results in a non-normal error
then it should be closed/discarded from the recycled pool
* Cache cursors based on hash(table/config) rather than just table.
* Finish NIF unload/reload functions and test.
* Test an upgrade, include a format/schema/WT change.
* When WT_PANIC is returned first try to unload/reload then driver
and reset all state, if that fails then exit gracefully.
* Currently the `riak_kv_wterl_backend` module is stored in this
* The `wt:session_create` function currently returns an error under
certain circumstances, so we currently ignore its return value.
* The `riak_kv_wt_backend` module is currently designed to rely on the
fact that it runs in just a single Erlang scheduler thread, which is
necessary because WiredTiger doesn't allow a session to be used
concurrently by different threads. If the KV node design ever changes to
involve concurrency across scheduler threads, this current design will no
longer work correctly.
* Currently the `riak_kv_wt_backend` module is stored in this
repository, but it really belongs in the `riak_kv` repository.
* wterl:truncate/5 can segv, and its tests are commented out
* Add async_nif and wterl NIF stats to the results provided by the
stats API
* Longer term ideas/changes to consider:
* More testing, especially pulse/qc
* Riak/KV integration
* Store 2i indexes in separate tables
* Store buckets, in separate tables and keep a <<bucket/key>> index
to ensure that folds across a vnode are easy
* Provide a drop bucket API call
* Support key expirey
* An ets API (like the LevelDB's lets project)
* Use mime-type to inform WT's schema for key value encoding
* Other use cases within Riak
* An AAE driver using WT
* An ability to store the ring file via WT
* There are currently some stability issues with WiredTiger that can
sometimes cause errors when restarting KV nodes with non-empty WiredTiger
storage.
Future support for secondary indexes requires WiredTiger features that are
under development but are not yet available.
Deploying
---------
You can deploy `wterl` into a Riak devrel cluster using the `enable-wterl`
You can deploy `wt` into a Riak devrel cluster using the `enable-wt`
script. Clone the `riak` repo, change your working directory to it, and
then execute the `enable-wterl` script. It adds `wterl` as a dependency,
then execute the `enable-wt` script. It adds `wt` as a dependency,
runs `make all devrel`, and then modifies the configuration settings of the
resulting dev nodes to use the WiredTiger storage backend.

View file

@ -1,609 +0,0 @@
/*
* async_nif: An async thread-pool layer for Erlang's NIF API
*
* Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
* Author: Gregory Burd <greg@basho.com> <greg@burd.me>
*
* This file is provided to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#ifndef __ASYNC_NIF_H__
#define __ASYNC_NIF_H__
#if defined(__cplusplus)
extern "C" {
#endif
#include <assert.h>
#include "queue.h"
#ifndef UNUSED
#define UNUSED(v) ((void)(v))
#endif
#define ASYNC_NIF_MAX_WORKERS 1024
#define ASYNC_NIF_MIN_WORKERS 2
#define ASYNC_NIF_WORKER_QUEUE_SIZE 8192
#define ASYNC_NIF_MAX_QUEUED_REQS ASYNC_NIF_WORKER_QUEUE_SIZE * ASYNC_NIF_MAX_WORKERS
/* Atoms (initialized in on_load) */
static ERL_NIF_TERM ATOM_EAGAIN;
static ERL_NIF_TERM ATOM_ENOMEM;
static ERL_NIF_TERM ATOM_ENQUEUED;
static ERL_NIF_TERM ATOM_ERROR;
static ERL_NIF_TERM ATOM_OK;
static ERL_NIF_TERM ATOM_SHUTDOWN;
struct async_nif_req_entry {
ERL_NIF_TERM ref;
ErlNifEnv *env;
ErlNifPid pid;
void *args;
void (*fn_work)(ErlNifEnv*, ERL_NIF_TERM, ErlNifPid*, unsigned int, void *);
void (*fn_post)(void *);
STAILQ_ENTRY(async_nif_req_entry) entries;
};
struct async_nif_work_queue {
unsigned int num_workers;
unsigned int depth;
ErlNifMutex *reqs_mutex;
ErlNifCond *reqs_cnd;
struct async_nif_work_queue *next;
STAILQ_HEAD(reqs, async_nif_req_entry) reqs;
};
struct async_nif_worker_entry {
ErlNifTid tid;
unsigned int worker_id;
struct async_nif_state *async_nif;
struct async_nif_work_queue *q;
SLIST_ENTRY(async_nif_worker_entry) entries;
};
struct async_nif_state {
unsigned int shutdown;
ErlNifMutex *we_mutex;
unsigned int we_active;
SLIST_HEAD(joining, async_nif_worker_entry) we_joining;
unsigned int num_queues;
unsigned int next_q;
STAILQ_HEAD(recycled_reqs, async_nif_req_entry) recycled_reqs;
unsigned int num_reqs;
ErlNifMutex *recycled_req_mutex;
struct async_nif_work_queue queues[];
};
#define ASYNC_NIF_DECL(decl, frame, pre_block, work_block, post_block) \
struct decl ## _args frame; \
static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, ErlNifPid *pid, unsigned int worker_id, struct decl ## _args *args) { \
UNUSED(worker_id); \
DPRINTF("async_nif: calling \"%s\"", __func__); \
do work_block while(0); \
DPRINTF("async_nif: returned from \"%s\"", __func__); \
} \
static void fn_post_ ## decl (struct decl ## _args *args) { \
UNUSED(args); \
DPRINTF("async_nif: calling \"fn_post_%s\"", #decl); \
do post_block while(0); \
DPRINTF("async_nif: returned from \"fn_post_%s\"", #decl); \
} \
static ERL_NIF_TERM decl(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv_in[]) { \
struct decl ## _args on_stack_args; \
struct decl ## _args *args = &on_stack_args; \
struct decl ## _args *copy_of_args; \
struct async_nif_req_entry *req = NULL; \
unsigned int affinity = 0; \
ErlNifEnv *new_env = NULL; \
/* argv[0] is a ref used for selective recv */ \
const ERL_NIF_TERM *argv = argv_in + 1; \
argc -= 1; \
/* Note: !!! this assumes that the first element of priv_data is ours */ \
struct async_nif_state *async_nif = *(struct async_nif_state**)enif_priv_data(env); \
if (async_nif->shutdown) \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_SHUTDOWN); \
req = async_nif_reuse_req(async_nif); \
if (!req) \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_ENOMEM); \
new_env = req->env; \
DPRINTF("async_nif: calling \"%s\"", __func__); \
do pre_block while(0); \
DPRINTF("async_nif: returned from \"%s\"", __func__); \
copy_of_args = (struct decl ## _args *)malloc(sizeof(struct decl ## _args)); \
if (!copy_of_args) { \
fn_post_ ## decl (args); \
async_nif_recycle_req(req, async_nif); \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_ENOMEM); \
} \
memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \
req->ref = enif_make_copy(new_env, argv_in[0]); \
enif_self(env, &req->pid); \
req->args = (void*)copy_of_args; \
req->fn_work = (void (*)(ErlNifEnv *, ERL_NIF_TERM, ErlNifPid*, unsigned int, void *))fn_work_ ## decl ; \
req->fn_post = (void (*)(void *))fn_post_ ## decl; \
int h = -1; \
if (affinity) \
h = ((unsigned int)affinity) % async_nif->num_queues; \
ERL_NIF_TERM reply = async_nif_enqueue_req(async_nif, req, h); \
if (!reply) { \
fn_post_ ## decl (args); \
async_nif_recycle_req(req, async_nif); \
free(copy_of_args); \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_EAGAIN); \
} \
return reply; \
}
#define ASYNC_NIF_INIT(name) \
static ErlNifMutex *name##_async_nif_coord = NULL;
#define ASYNC_NIF_LOAD(name, env, priv) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create("nif_coord load"); \
enif_mutex_lock(name##_async_nif_coord); \
priv = async_nif_load(env); \
enif_mutex_unlock(name##_async_nif_coord); \
} while(0);
#define ASYNC_NIF_UNLOAD(name, env, priv) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create("nif_coord unload"); \
enif_mutex_lock(name##_async_nif_coord); \
async_nif_unload(env, priv); \
enif_mutex_unlock(name##_async_nif_coord); \
enif_mutex_destroy(name##_async_nif_coord); \
name##_async_nif_coord = NULL; \
} while(0);
#define ASYNC_NIF_UPGRADE(name, env) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create("nif_coord upgrade"); \
enif_mutex_lock(name##_async_nif_coord); \
async_nif_upgrade(env); \
enif_mutex_unlock(name##_async_nif_coord); \
} while(0);
#define ASYNC_NIF_RETURN_BADARG() do { \
async_nif_recycle_req(req, async_nif); \
return enif_make_badarg(env); \
} while(0);
#define ASYNC_NIF_WORK_ENV new_env
#define ASYNC_NIF_REPLY(msg) enif_send(NULL, pid, env, enif_make_tuple2(env, ref, msg))
/**
* Return a request structure from the recycled req queue if one exists,
* otherwise create one.
*/
struct async_nif_req_entry *
async_nif_reuse_req(struct async_nif_state *async_nif)
{
struct async_nif_req_entry *req = NULL;
ErlNifEnv *env = NULL;
enif_mutex_lock(async_nif->recycled_req_mutex);
if (STAILQ_EMPTY(&async_nif->recycled_reqs)) {
if (async_nif->num_reqs < ASYNC_NIF_MAX_QUEUED_REQS) {
req = malloc(sizeof(struct async_nif_req_entry));
if (req) {
memset(req, 0, sizeof(struct async_nif_req_entry));
env = enif_alloc_env();
if (env) {
req->env = env;
__sync_fetch_and_add(&async_nif->num_reqs, 1);
} else {
free(req);
req = NULL;
}
}
}
} else {
req = STAILQ_FIRST(&async_nif->recycled_reqs);
STAILQ_REMOVE(&async_nif->recycled_reqs, req, async_nif_req_entry, entries);
}
enif_mutex_unlock(async_nif->recycled_req_mutex);
return req;
}
/**
* Store the request for future re-use.
*
* req a request entry with an ErlNifEnv* which will be cleared
* before reuse, but not until then.
* async_nif a handle to our state so that we can find and use the mutex
*/
void
async_nif_recycle_req(struct async_nif_req_entry *req, struct async_nif_state *async_nif)
{
ErlNifEnv *env = NULL;
enif_mutex_lock(async_nif->recycled_req_mutex);
enif_clear_env(req->env);
env = req->env;
memset(req, 0, sizeof(struct async_nif_req_entry));
req->env = env;
STAILQ_INSERT_TAIL(&async_nif->recycled_reqs, req, entries);
enif_mutex_unlock(async_nif->recycled_req_mutex);
}
static void *async_nif_worker_fn(void *);
/**
* Start up a worker thread.
*/
static int
async_nif_start_worker(struct async_nif_state *async_nif, struct async_nif_work_queue *q)
{
struct async_nif_worker_entry *we;
if (0 == q)
return EINVAL;
enif_mutex_lock(async_nif->we_mutex);
we = SLIST_FIRST(&async_nif->we_joining);
while(we != NULL) {
struct async_nif_worker_entry *n = SLIST_NEXT(we, entries);
SLIST_REMOVE(&async_nif->we_joining, we, async_nif_worker_entry, entries);
void *exit_value = 0; /* We ignore the thread_join's exit value. */
enif_thread_join(we->tid, &exit_value);
free(we);
async_nif->we_active--;
we = n;
}
if (async_nif->we_active == ASYNC_NIF_MAX_WORKERS) {
enif_mutex_unlock(async_nif->we_mutex);
return EAGAIN;
}
we = malloc(sizeof(struct async_nif_worker_entry));
if (!we) {
enif_mutex_unlock(async_nif->we_mutex);
return ENOMEM;
}
memset(we, 0, sizeof(struct async_nif_worker_entry));
we->worker_id = async_nif->we_active++;
we->async_nif = async_nif;
we->q = q;
enif_mutex_unlock(async_nif->we_mutex);
return enif_thread_create(NULL,&we->tid, &async_nif_worker_fn, (void*)we, 0);
}
/**
* Enqueue a request for processing by a worker thread.
*
* Places the request into a work queue determined either by the
* provided affinity or by iterating through the available queues.
*/
static ERL_NIF_TERM
async_nif_enqueue_req(struct async_nif_state* async_nif, struct async_nif_req_entry *req, int hint)
{
/* Identify the most appropriate worker for this request. */
unsigned int i, last_qid, qid = 0;
struct async_nif_work_queue *q = NULL;
double avg_depth = 0.0;
/* Either we're choosing a queue based on some affinity/hinted value or we
need to select the next queue in the rotation and atomically update that
global value (next_q is shared across worker threads) . */
if (hint >= 0) {
qid = (unsigned int)hint;
} else {
do {
last_qid = __sync_fetch_and_add(&async_nif->next_q, 0);
qid = (last_qid + 1) % async_nif->num_queues;
} while (!__sync_bool_compare_and_swap(&async_nif->next_q, last_qid, qid));
}
/* Now we inspect and interate across the set of queues trying to select one
that isn't too full or too slow. */
for (i = 0; i < async_nif->num_queues; i++) {
/* Compute the average queue depth not counting queues which are empty or
the queue we're considering right now. */
unsigned int j, n = 0;
for (j = 0; j < async_nif->num_queues; j++) {
if (j != qid && async_nif->queues[j].depth != 0) {
n++;
avg_depth += async_nif->queues[j].depth;
}
}
if (avg_depth) avg_depth /= n;
/* Lock this queue under consideration, then check for shutdown. While
we hold this lock either a) we're shutting down so exit now or b) this
queue will be valid until we release the lock. */
q = &async_nif->queues[qid];
enif_mutex_lock(q->reqs_mutex);
/* Try not to enqueue a request into a queue that isn't keeping up with
the request volume. */
if (q->depth <= avg_depth) break;
else {
enif_mutex_unlock(q->reqs_mutex);
qid = (qid + 1) % async_nif->num_queues;
}
}
/* If the for loop finished then we didn't find a suitable queue for this
request, meaning we're backed up so trigger eagain. Note that if we left
the loop in this way we hold no lock. */
if (i == async_nif->num_queues) return 0;
/* Add the request to the queue. */
STAILQ_INSERT_TAIL(&q->reqs, req, entries);
__sync_fetch_and_add(&q->depth, 1);
/* We've selected a queue for this new request now check to make sure there are
enough workers actively processing requests on this queue. */
while (q->depth > q->num_workers) {
switch(async_nif_start_worker(async_nif, q)) {
case EINVAL: case ENOMEM: default: return 0;
case EAGAIN: continue;
case 0: __sync_fetch_and_add(&q->num_workers, 1); goto done;
}
}done:;
/* Build the term before releasing the lock so as not to race on the use of
the req pointer (which will soon become invalid in another thread
performing the request). */
double pct_full = (double)avg_depth / (double)ASYNC_NIF_WORKER_QUEUE_SIZE;
ERL_NIF_TERM reply = enif_make_tuple2(req->env, ATOM_OK,
enif_make_tuple2(req->env, ATOM_ENQUEUED,
enif_make_double(req->env, pct_full)));
enif_cond_signal(q->reqs_cnd);
enif_mutex_unlock(q->reqs_mutex);
return reply;
}
/**
* Worker threads execute this function. Here each worker pulls requests of
* their respective queues, executes that work and continues doing that until
* they see the shutdown flag is set at which point they exit.
*/
static void *
async_nif_worker_fn(void *arg)
{
struct async_nif_worker_entry *we = (struct async_nif_worker_entry *)arg;
unsigned int worker_id = we->worker_id;
struct async_nif_state *async_nif = we->async_nif;
struct async_nif_work_queue *q = we->q;
struct async_nif_req_entry *req = NULL;
unsigned int tries = async_nif->num_queues;
for(;;) {
/* Examine the request queue, are there things to be done? */
enif_mutex_lock(q->reqs_mutex);
check_again_for_work:
if (async_nif->shutdown) {
enif_mutex_unlock(q->reqs_mutex);
break;
}
if (STAILQ_EMPTY(&q->reqs)) {
/* Queue is empty so we wait for more work to arrive. */
enif_mutex_unlock(q->reqs_mutex);
if (tries == 0 && q == we->q) {
if (q->num_workers > ASYNC_NIF_MIN_WORKERS) {
/* At this point we've tried to find/execute work on all queues
* and there are at least MIN_WORKERS on this queue so we
* leaving this loop (break) which leads to a thread exit/join. */
break;
} else {
enif_mutex_lock(q->reqs_mutex);
enif_cond_wait(q->reqs_cnd, q->reqs_mutex);
goto check_again_for_work;
}
} else {
tries--;
__sync_fetch_and_add(&q->num_workers, -1);
q = q->next;
__sync_fetch_and_add(&q->num_workers, 1);
continue; // try next queue
}
} else {
/* At this point the next req is ours to process and we hold the
reqs_mutex lock. Take the request off the queue. */
req = STAILQ_FIRST(&q->reqs);
STAILQ_REMOVE(&q->reqs, req, async_nif_req_entry, entries);
__sync_fetch_and_add(&q->depth, -1);
/* Wake up other worker thread watching this queue to help process work. */
enif_cond_signal(q->reqs_cnd);
enif_mutex_unlock(q->reqs_mutex);
/* Perform the work. */
req->fn_work(req->env, req->ref, &req->pid, worker_id, req->args);
/* Now call the post-work cleanup function. */
req->fn_post(req->args);
/* Clean up req for reuse. */
req->ref = 0;
req->fn_work = 0;
req->fn_post = 0;
free(req->args);
req->args = NULL;
async_nif_recycle_req(req, async_nif);
req = NULL;
}
}
enif_mutex_lock(async_nif->we_mutex);
SLIST_INSERT_HEAD(&async_nif->we_joining, we, entries);
enif_mutex_unlock(async_nif->we_mutex);
__sync_fetch_and_add(&q->num_workers, -1);
enif_thread_exit(0);
return 0;
}
static void
async_nif_unload(ErlNifEnv *env, struct async_nif_state *async_nif)
{
unsigned int i;
unsigned int num_queues = async_nif->num_queues;
struct async_nif_work_queue *q = NULL;
struct async_nif_req_entry *req = NULL;
struct async_nif_worker_entry *we = NULL;
UNUSED(env);
/* Signal the worker threads, stop what you're doing and exit. To ensure
that we don't race with the enqueue() process we first lock all the worker
queues, then set shutdown to true, then unlock. The enqueue function will
take the queue mutex, then test for shutdown condition, then enqueue only
if not shutting down. */
for (i = 0; i < num_queues; i++) {
q = &async_nif->queues[i];
enif_mutex_lock(q->reqs_mutex);
}
/* Set the shutdown flag so that worker threads will no continue
executing requests. */
async_nif->shutdown = 1;
for (i = 0; i < num_queues; i++) {
q = &async_nif->queues[i];
enif_mutex_unlock(q->reqs_mutex);
}
/* Join for the now exiting worker threads. */
while(async_nif->we_active > 0) {
for (i = 0; i < num_queues; i++)
enif_cond_broadcast(async_nif->queues[i].reqs_cnd);
enif_mutex_lock(async_nif->we_mutex);
we = SLIST_FIRST(&async_nif->we_joining);
while(we != NULL) {
struct async_nif_worker_entry *n = SLIST_NEXT(we, entries);
SLIST_REMOVE(&async_nif->we_joining, we, async_nif_worker_entry, entries);
void *exit_value = 0; /* We ignore the thread_join's exit value. */
enif_thread_join(we->tid, &exit_value);
free(we);
async_nif->we_active--;
we = n;
}
enif_mutex_unlock(async_nif->we_mutex);
}
enif_mutex_destroy(async_nif->we_mutex);
/* Cleanup in-flight requests, mutexes and conditions in each work queue. */
for (i = 0; i < num_queues; i++) {
q = &async_nif->queues[i];
/* Worker threads are stopped, now toss anything left in the queue. */
req = NULL;
req = STAILQ_FIRST(&q->reqs);
while(req != NULL) {
struct async_nif_req_entry *n = STAILQ_NEXT(req, entries);
enif_clear_env(req->env);
enif_send(NULL, &req->pid, req->env,
enif_make_tuple2(req->env, ATOM_ERROR, ATOM_SHUTDOWN));
req->fn_post(req->args);
enif_free_env(req->env);
free(req->args);
free(req);
req = n;
}
enif_mutex_destroy(q->reqs_mutex);
enif_cond_destroy(q->reqs_cnd);
}
/* Free any req structures sitting unused on the recycle queue. */
enif_mutex_lock(async_nif->recycled_req_mutex);
req = NULL;
req = STAILQ_FIRST(&async_nif->recycled_reqs);
while(req != NULL) {
struct async_nif_req_entry *n = STAILQ_NEXT(req, entries);
enif_free_env(req->env);
free(req);
req = n;
}
enif_mutex_unlock(async_nif->recycled_req_mutex);
enif_mutex_destroy(async_nif->recycled_req_mutex);
memset(async_nif, 0, sizeof(struct async_nif_state) + (sizeof(struct async_nif_work_queue) * async_nif->num_queues));
free(async_nif);
}
static void *
async_nif_load(ErlNifEnv *env)
{
static int has_init = 0;
unsigned int i, num_queues;
ErlNifSysInfo info;
struct async_nif_state *async_nif;
/* Don't init more than once. */
if (has_init) return 0;
else has_init = 1;
/* Init some static references to commonly used atoms. */
ATOM_EAGAIN = enif_make_atom(env, "eagain");
ATOM_ENOMEM = enif_make_atom(env, "enomem");
ATOM_ENQUEUED = enif_make_atom(env, "enqueued");
ATOM_ERROR = enif_make_atom(env, "error");
ATOM_OK = enif_make_atom(env, "ok");
ATOM_SHUTDOWN = enif_make_atom(env, "shutdown");
/* Find out how many schedulers there are. */
enif_system_info(&info, sizeof(ErlNifSysInfo));
/* Size the number of work queues according to schedulers. */
if (info.scheduler_threads > ASYNC_NIF_MAX_WORKERS / 2) {
num_queues = ASYNC_NIF_MAX_WORKERS / 2;
} else {
int remainder = ASYNC_NIF_MAX_WORKERS % info.scheduler_threads;
if (remainder != 0)
num_queues = info.scheduler_threads - remainder;
else
num_queues = info.scheduler_threads;
if (num_queues < 2)
num_queues = 2;
}
/* Init our portion of priv_data's module-specific state. */
async_nif = malloc(sizeof(struct async_nif_state) +
sizeof(struct async_nif_work_queue) * num_queues);
if (!async_nif)
return NULL;
memset(async_nif, 0, sizeof(struct async_nif_state) +
sizeof(struct async_nif_work_queue) * num_queues);
async_nif->num_queues = num_queues;
async_nif->we_active = 0;
async_nif->next_q = 0;
async_nif->shutdown = 0;
STAILQ_INIT(&async_nif->recycled_reqs);
async_nif->recycled_req_mutex = enif_mutex_create("recycled_req");
async_nif->we_mutex = enif_mutex_create("we");
SLIST_INIT(&async_nif->we_joining);
for (i = 0; i < async_nif->num_queues; i++) {
struct async_nif_work_queue *q = &async_nif->queues[i];
STAILQ_INIT(&q->reqs);
q->reqs_mutex = enif_mutex_create("reqs");
q->reqs_cnd = enif_cond_create("reqs");
q->next = &async_nif->queues[(i + 1) % num_queues];
}
return async_nif;
}
static void
async_nif_upgrade(ErlNifEnv *env)
{
UNUSED(env);
// TODO:
}
#if defined(__cplusplus)
}
#endif
#endif // __ASYNC_NIF_H__

View file

@ -1,152 +1,29 @@
#!/bin/bash
# /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/ksh is.
if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then
POSIX_SHELL="true"
export POSIX_SHELL
exec /usr/bin/ksh $0 $@
fi
unset POSIX_SHELL # clear it so if we invoke other scripts, they run as ksh as well
set -e
WT_REPO=http://github.com/wiredtiger/wiredtiger.git
WT_BRANCH=develop
WT_DIR=wiredtiger-`basename $WT_BRANCH`
#WT_REF="tags/1.6.6"
#WT_DIR=wiredtiger-`basename $WT_REF`
SNAPPY_VSN="1.0.4"
SNAPPY_DIR=snappy-$SNAPPY_VSN
WT_VSN=1.4.2
[ `basename $PWD` != "c_src" ] && cd c_src
export BASEDIR="$PWD"
which gmake 1>/dev/null 2>/dev/null && MAKE=gmake
MAKE=${MAKE:-make}
export CPPFLAGS="$CPPLAGS -I $BASEDIR/system/include -O3 -mtune=native -march=native"
export LDFLAGS="$LDFLAGS -L$BASEDIR/system/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$BASEDIR/system/lib:$LD_LIBRARY_PATH"
get_wt ()
{
if [ -d $BASEDIR/$WT_DIR/.git ]; then
(cd $BASEDIR/$WT_DIR && git pull -u) || exit 1
else
if [ "X$WT_REF" != "X" ]; then
git clone ${WT_REPO} ${WT_DIR} && \
(cd $BASEDIR/$WT_DIR && git checkout refs/$WT_REF || exit 1)
else
git clone ${WT_REPO} ${WT_DIR} && \
(cd $BASEDIR/$WT_DIR && git checkout -b $WT_BRANCH origin/$WT_BRANCH || exit 1)
fi
fi
[ -d $BASEDIR/$WT_DIR ] || (echo "Missing WiredTiger source directory" && exit 1)
(cd $BASEDIR/$WT_DIR
[ -e $BASEDIR/wiredtiger-build.patch ] && \
(patch -p1 --forward < $BASEDIR/wiredtiger-build.patch || exit 1 )
./autogen.sh || exit 1
[ -e $BASEDIR/$WT_DIR/build_posix/Makefile ] && \
(cd $BASEDIR/$WT_DIR/build_posix && $MAKE distclean)
wt_configure;
)
}
wt_configure ()
{
(cd $BASEDIR/$WT_DIR/build_posix
CFLAGS+=-g $BASEDIR/$WT_DIR/configure --with-pic \
--enable-snappy \
--prefix=${BASEDIR}/system || exit 1)
}
get_snappy ()
{
[ -e snappy-$SNAPPY_VSN.tar.gz ] || (echo "Missing Snappy ($SNAPPY_VSN) source package" && exit 1)
[ -d $BASEDIR/$SNAPPY_DIR ] || tar -xzf snappy-$SNAPPY_VSN.tar.gz
[ -e $BASEDIR/snappy-build.patch ] && \
(cd $BASEDIR/$SNAPPY_DIR
patch -p1 --forward < $BASEDIR/snappy-build.patch || exit 1)
(cd $BASEDIR/$SNAPPY_DIR
./configure --with-pic --prefix=$BASEDIR/system || exit 1)
}
get_deps ()
{
get_snappy;
get_wt;
}
update_deps ()
{
if [ -d $BASEDIR/$WT_DIR/.git ]; then
(cd $BASEDIR/$WT_DIR
if [ "X$WT_VSN" == "X" ]; then
git pull -u || exit 1
else
git checkout $WT_VSN || exit 1
fi
)
fi
}
build_wt ()
{
wt_configure;
(cd $BASEDIR/$WT_DIR/build_posix && $MAKE -j && $MAKE install)
}
build_snappy ()
{
(cd $BASEDIR/$SNAPPY_DIR && \
$MAKE -j && \
$MAKE install
)
}
BASEDIR="$PWD"
case "$1" in
clean)
[ -e $BASEDIR/$WT_DIR/build_posix/Makefile ] && \
(cd $BASEDIR/$WT_DIR/build_posix && $MAKE clean)
rm -rf system $SNAPPY_DIR
rm -f ${BASEDIR}/../priv/wt
rm -f ${BASEDIR}/../priv/libwiredtiger-*.so
rm -f ${BASEDIR}/../priv/libwiredtiger_*.so
rm -f ${BASEDIR}/../priv/libsnappy.so.*
;;
test)
(cd $BASEDIR/$WT_DIR && $MAKE -j test)
;;
update-deps)
update-deps;
;;
get-deps)
get_deps;
rm -rf system wiredtiger-$WT_VSN
;;
*)
shopt -s extglob
SUFFIXES='@(so|dylib)'
test -f system/lib/libwiredtiger.a && exit 0
# Build Snappy
[ -d $SNAPPY_DIR ] || get_snappy;
[ -d $BASEDIR/$SNAPPY_DIR ] || (echo "Missing Snappy source directory" && exit 1)
test -f $BASEDIR/system/lib/libsnappy.so.[0-9].[0-9].[0-9].* || build_snappy;
tar -xjf wiredtiger-$WT_VSN.tar.bz2
# --enable-snappy --enable-bzip2 --enable-lz4 \
(cd wiredtiger-$WT_VSN/build_posix && \
../configure --with-pic \
--prefix=$BASEDIR/system && \
make && make install)
# Build WiredTiger
[ -d $WT_DIR ] || get_wt;
[ -d $BASEDIR/$WT_DIR ] || (echo "Missing WiredTiger source directory" && exit 1)
test -f $BASEDIR/system/lib/libwiredtiger-[0-9].[0-9].[0-9].${SUFFIXES} -a \
-f $BASEDIR/system/lib/libwiredtiger_snappy.${SUFFIXES} || build_wt;
[ -d $BASEDIR/../priv ] || mkdir ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/bin/wt ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libwiredtiger-[0-9].[0-9].[0-9].${SUFFIXES} ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libwiredtiger_snappy.${SUFFIXES} ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libsnappy.${SUFFIXES}* ${BASEDIR}/../priv
;;
esac

View file

@ -1,66 +0,0 @@
/*
* Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
* Author: Gregory Burd <greg@basho.com> <greg@burd.me>
*
* This file is provided to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef __COMMON_H__
#define __COMMON_H__
#if defined(__cplusplus)
extern "C" {
#endif
#if !(__STDC_VERSION__ >= 199901L || defined(__GNUC__))
# undef DEBUG
# define DEBUG 0
# define DPRINTF (void) /* Vararg macros may be unsupported */
#elif DEBUG
#include <stdio.h>
#include <stdarg.h>
#define DPRINTF(fmt, ...) \
do { \
fprintf(stderr, "%s:%d " fmt "\n", __FILE__, __LINE__, __VA_ARGS__); \
fflush(stderr); \
} while(0)
#define DPUTS(arg) DPRINTF("%s", arg)
#else
#define DPRINTF(fmt, ...) ((void) 0)
#define DPUTS(arg) ((void) 0)
#endif
#ifndef __UNUSED
#define __UNUSED(v) ((void)(v))
#endif
#ifndef COMPQUIET
#define COMPQUIET(n, v) do { \
(n) = (v); \
(n) = (n); \
} while (0)
#endif
#ifdef __APPLE__
#define PRIuint64(x) (x)
#else
#define PRIuint64(x) (unsigned long long)(x)
#endif
#if defined(__cplusplus)
}
#endif
#endif // __COMMON_H__

View file

@ -1,678 +0,0 @@
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $
*/
#ifndef _DB_QUEUE_H_
#define _DB_QUEUE_H_
#ifndef __offsetof
#define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif
#ifndef __containerof
#define __containerof(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - __offsetof(type,member) );})
#endif
#if defined(__cplusplus)
extern "C" {
#endif
/*
* This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues.
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A singly-linked tail queue is headed by a pair of pointers, one to the
* head of the list and the other to the tail of the list. The elements are
* singly linked for minimum space and pointer manipulation overhead at the
* expense of O(n) removal for arbitrary elements. New elements can be added
* to the list after an existing element, at the head of the list, or at the
* end of the list. Elements being removed from the head of the tail queue
* should use the explicit macro for this purpose for optimum efficiency.
* A singly-linked tail queue may only be traversed in the forward direction.
* Singly-linked tail queues are ideal for applications with large datasets
* and few or no removals or for implementing a FIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* For details on the use of these macros, see the queue(3) manual page.
*
*
* SLIST LIST STAILQ TAILQ
* _HEAD + + + +
* _HEAD_INITIALIZER + + + +
* _ENTRY + + + +
* _INIT + + + +
* _EMPTY + + + +
* _FIRST + + + +
* _NEXT + + + +
* _PREV - - - +
* _LAST - - + +
* _FOREACH + + + +
* _FOREACH_REVERSE - - - +
* _INSERT_HEAD + + + +
* _INSERT_BEFORE - + - +
* _INSERT_AFTER + + + +
* _INSERT_TAIL - - + +
* _CONCAT - - + +
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
*
*/
/*
* XXX
* We #undef all of the macros because there are incompatible versions of this
* file and these macros on various systems. What makes the problem worse is
* they are included and/or defined by system include files which we may have
* already loaded into Berkeley DB before getting here. For example, FreeBSD's
* <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
* several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these
* same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours.
*/
#undef LIST_EMPTY
#undef LIST_ENTRY
#undef LIST_FIRST
#undef LIST_FOREACH
#undef LIST_HEAD
#undef LIST_HEAD_INITIALIZER
#undef LIST_INIT
#undef LIST_INSERT_AFTER
#undef LIST_INSERT_BEFORE
#undef LIST_INSERT_HEAD
#undef LIST_NEXT
#undef LIST_REMOVE
#undef QMD_TRACE_ELEM
#undef QMD_TRACE_HEAD
#undef QUEUE_MACRO_DEBUG
#undef SLIST_EMPTY
#undef SLIST_ENTRY
#undef SLIST_FIRST
#undef SLIST_FOREACH
#undef SLIST_FOREACH_PREVPTR
#undef SLIST_HEAD
#undef SLIST_HEAD_INITIALIZER
#undef SLIST_INIT
#undef SLIST_INSERT_AFTER
#undef SLIST_INSERT_HEAD
#undef SLIST_NEXT
#undef SLIST_REMOVE
#undef SLIST_REMOVE_HEAD
#undef STAILQ_CONCAT
#undef STAILQ_EMPTY
#undef STAILQ_ENTRY
#undef STAILQ_FIRST
#undef STAILQ_FOREACH
#undef STAILQ_HEAD
#undef STAILQ_HEAD_INITIALIZER
#undef STAILQ_INIT
#undef STAILQ_INSERT_AFTER
#undef STAILQ_INSERT_HEAD
#undef STAILQ_INSERT_TAIL
#undef STAILQ_LAST
#undef STAILQ_NEXT
#undef STAILQ_REMOVE
#undef STAILQ_REMOVE_HEAD
#undef STAILQ_REMOVE_HEAD_UNTIL
#undef TAILQ_CONCAT
#undef TAILQ_EMPTY
#undef TAILQ_ENTRY
#undef TAILQ_FIRST
#undef TAILQ_FOREACH
#undef TAILQ_FOREACH_REVERSE
#undef TAILQ_HEAD
#undef TAILQ_HEAD_INITIALIZER
#undef TAILQ_INIT
#undef TAILQ_INSERT_AFTER
#undef TAILQ_INSERT_BEFORE
#undef TAILQ_INSERT_HEAD
#undef TAILQ_INSERT_TAIL
#undef TAILQ_LAST
#undef TAILQ_NEXT
#undef TAILQ_PREV
#undef TAILQ_REMOVE
#undef TRACEBUF
#undef TRASHIT
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */
struct qm_trace {
char * lastfile;
int lastline;
char * prevfile;
int prevline;
};
#define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
#define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \
(head)->trace.prevfile = (head)->trace.lastfile; \
(head)->trace.lastline = __LINE__; \
(head)->trace.lastfile = __FILE__; \
} while (0)
#define QMD_TRACE_ELEM(elem) do { \
(elem)->trace.prevline = (elem)->trace.lastline; \
(elem)->trace.prevfile = (elem)->trace.lastfile; \
(elem)->trace.lastline = __LINE__; \
(elem)->trace.lastfile = __FILE__; \
} while (0)
#else
#define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head)
#define TRACEBUF
#define TRASHIT(x)
#endif /* QUEUE_MACRO_DEBUG */
/*
* Singly-linked List declarations.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_FOREACH(var, head, field) \
for ((var) = SLIST_FIRST((head)); \
(var); \
(var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
(varp) = &SLIST_NEXT((var), field))
#define SLIST_INIT(head) do { \
SLIST_FIRST((head)) = NULL; \
} while (0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
SLIST_NEXT((slistelm), field) = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
SLIST_FIRST((head)) = (elm); \
} while (0)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \
if (SLIST_FIRST((head)) == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = SLIST_FIRST((head)); \
while (SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \
SLIST_NEXT(curelm, field) = \
SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
} \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
/*
* Singly-linked Tail queue declarations.
*/
#define STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first;/* first element */ \
struct type **stqh_last;/* addr of last next element */ \
}
#define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \
} \
} while (0)
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_FOREACH(var, head, field) \
for ((var) = STAILQ_FIRST((head)); \
(var); \
(var) = STAILQ_NEXT((var), field))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_NEXT((tqelm), field) = (elm); \
} while (0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_FIRST((head)) = (elm); \
} while (0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
STAILQ_NEXT((elm), field) = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \
NULL : \
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \
if (STAILQ_FIRST((head)) == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = STAILQ_FIRST((head)); \
while (STAILQ_NEXT(curelm, field) != (elm)) \
curelm = STAILQ_NEXT(curelm, field); \
if ((STAILQ_NEXT(curelm, field) = \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
} \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
if ((STAILQ_FIRST((head)) = \
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
/*
* List declarations.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_FOREACH(var, head, field) \
for ((var) = LIST_FIRST((head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
LIST_NEXT((listelm), field)->field.le_prev = \
&LIST_NEXT((elm), field); \
LIST_NEXT((listelm), field) = (elm); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
LIST_FIRST((head)) = (elm); \
(elm)->field.le_prev = &LIST_FIRST((head)); \
} while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_REMOVE(elm, field) do { \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
} while (0)
/*
* Tail queue declarations.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
TRACEBUF \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
TRACEBUF \
}
/*
* Tail queue functions.
*/
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_HEAD(head2); \
} \
} while (0)
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST((head)); \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
TAILQ_NEXT((elm), field)->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else { \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
TAILQ_FIRST((head)) = (elm); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ (void *)&head, (void *)&head }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
(head)->cqh_first = (void *)(head); \
(head)->cqh_last = (void *)(head); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = (void *)(head); \
if ((head)->cqh_last == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = (void *)(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (/*CONSTCOND*/0)
#define CIRCLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->cqh_first); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_next))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for ((var) = ((head)->cqh_last); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_prev))
/*
* Circular queue access methods.
*/
#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
(((elm)->field.cqe_next == (void *)(head)) \
? ((head)->cqh_first) \
: (elm->field.cqe_next))
#define CIRCLEQ_LOOP_PREV(head, elm, field) \
(((elm)->field.cqe_prev == (void *)(head)) \
? ((head)->cqh_last) \
: (elm->field.cqe_prev))
#if defined(__cplusplus)
}
#endif
#endif /* !_DB_QUEUE_H_ */

Binary file not shown.

Binary file not shown.

View file

@ -1,12 +0,0 @@
diff --git a/ext/compressors/snappy/Makefile.am b/ext/compressors/snappy/Makefile.am
index 6d78823..c423590 100644
--- a/ext/compressors/snappy/Makefile.am
+++ b/ext/compressors/snappy/Makefile.am
@@ -2,5 +2,6 @@ AM_CPPFLAGS = -I$(top_builddir) -I$(top_srcdir)/src/include
lib_LTLIBRARIES = libwiredtiger_snappy.la
libwiredtiger_snappy_la_SOURCES = snappy_compress.c
-libwiredtiger_snappy_la_LDFLAGS = -avoid-version -module
+libwiredtiger_snappy_la_CFLAGS = -I$(abs_top_builddir)/../../system/include
+libwiredtiger_snappy_la_LDFLAGS = -avoid-version -module -L$(abs_top_builddir)/../../system/lib -Wl,-rpath,lib/wterl-0.9.0/priv:lib/wterl/priv:priv
libwiredtiger_snappy_la_LIBADD = -lsnappy

672
c_src/wt.c Normal file
View file

@ -0,0 +1,672 @@
// -------------------------------------------------------------------
//
// wt: Erlang Wrapper for WiredTiger
//
// Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
//
// This file is provided to you under the Apache License,
// Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// -------------------------------------------------------------------
#include "erl_nif.h"
#include "erl_driver.h"
#include <stdio.h>
#include <string.h>
#include "wiredtiger.h"
static ErlNifResourceType* wt_conn_RESOURCE;
static ErlNifResourceType* wt_session_RESOURCE;
static ErlNifResourceType* wt_cursor_RESOURCE;
typedef struct {
WT_CONNECTION* conn;
} WtConnHandle;
typedef struct {
WT_SESSION* session;
} WtSessionHandle;
typedef struct {
WT_CURSOR* cursor;
} WtCursorHandle;
typedef char Uri[128]; // object names
// Atoms (initialized in on_load)
static ERL_NIF_TERM ATOM_ERROR;
static ERL_NIF_TERM ATOM_OK;
typedef ERL_NIF_TERM (*CursorRetFun)(ErlNifEnv* env, WT_CURSOR* cursor, int rc);
// Prototypes
static ERL_NIF_TERM wt_conn_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_conn_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_insert(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_key_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc);
static ERL_NIF_TERM wt_cursor_kv_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc);
static ERL_NIF_TERM wt_cursor_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_next_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_next_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_np_worker(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[],
CursorRetFun cursor_ret_fun, int next);
static ERL_NIF_TERM wt_cursor_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_prev(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_prev_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_prev_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_remove(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_reset(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_search_near(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_search_worker(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[], int near);
static ERL_NIF_TERM wt_cursor_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_cursor_value_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc);
static ERL_NIF_TERM wt_session_checkpoint(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_delete(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_drop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_get(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_put(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_rename(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_salvage(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_truncate(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_upgrade(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM wt_session_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ErlNifFunc nif_funcs[] =
{
{"conn_close", 1, wt_conn_close},
{"conn_open", 2, wt_conn_open},
{"cursor_close", 1, wt_cursor_close},
{"cursor_insert", 3, wt_cursor_insert},
{"cursor_next", 1, wt_cursor_next},
{"cursor_next_key", 1, wt_cursor_next_key},
{"cursor_next_value", 1, wt_cursor_next_value},
{"cursor_open", 2, wt_cursor_open},
{"cursor_prev", 1, wt_cursor_prev},
{"cursor_prev_key", 1, wt_cursor_prev_key},
{"cursor_prev_value", 1, wt_cursor_prev_value},
{"cursor_remove", 3, wt_cursor_remove},
{"cursor_reset", 1, wt_cursor_reset},
{"cursor_search", 2, wt_cursor_search},
{"cursor_search_near", 2, wt_cursor_search_near},
{"cursor_update", 3, wt_cursor_update},
{"session_checkpoint", 2, wt_session_checkpoint},
{"session_close", 1, wt_session_close},
{"session_create", 3, wt_session_create},
{"session_delete", 3, wt_session_delete},
{"session_drop", 3, wt_session_drop},
{"session_get", 3, wt_session_get},
{"session_open", 2, wt_session_open},
{"session_put", 4, wt_session_put},
{"session_rename", 4, wt_session_rename},
{"session_salvage", 3, wt_session_salvage},
{"session_truncate", 3, wt_session_truncate},
{"session_upgrade", 3, wt_session_upgrade},
{"session_verify", 3, wt_session_verify},
};
static inline ERL_NIF_TERM wt_strerror(ErlNifEnv* env, int rc)
{
return rc == WT_NOTFOUND ?
enif_make_atom(env, "not_found") :
enif_make_tuple2(env, ATOM_ERROR,
enif_make_string(env, wiredtiger_strerror(rc), ERL_NIF_LATIN1));
}
static ERL_NIF_TERM wt_conn_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary config;
char homedir[4096];
if (enif_get_string(env, argv[0], homedir, sizeof homedir, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[1], &config))
{
WT_CONNECTION* conn;
int rc = wiredtiger_open(homedir, NULL, (const char*)config.data, &conn);
if (rc == 0)
{
WtConnHandle* conn_handle = enif_alloc_resource(wt_conn_RESOURCE, sizeof(WtConnHandle));
conn_handle->conn = conn;
ERL_NIF_TERM result = enif_make_resource(env, conn_handle);
enif_release_resource(conn_handle);
return enif_make_tuple2(env, ATOM_OK, result);
}
else
{
return wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_conn_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtConnHandle* conn_handle;
if (enif_get_resource(env, argv[0], wt_conn_RESOURCE, (void**)&conn_handle))
{
WT_CONNECTION* conn = conn_handle->conn;
int rc = conn->close(conn, NULL);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
return enif_make_badarg(env);
}
#define WT_OP_CREATE 1
#define WT_OP_DROP 2
#define WT_OP_SALVAGE 3
#define WT_OP_TRUNCATE 4
#define WT_OP_UPGRADE 5
#define WT_OP_VERIFY 6
static inline ERL_NIF_TERM wt_session_worker(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[], int op)
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
WT_SESSION* session = session_handle->session;
int rc;
Uri uri;
ErlNifBinary config;
if (enif_get_string(env, argv[1], uri, sizeof uri, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &config))
{
switch (op)
{
case WT_OP_CREATE:
rc = session->create(session, uri, (const char*)config.data);
break;
case WT_OP_DROP:
rc = session->drop(session, uri, (const char*)config.data);
break;
case WT_OP_SALVAGE:
rc = session->salvage(session, uri, (const char*)config.data);
break;
case WT_OP_TRUNCATE:
// Ignore the cursor start/stop form of truncation for now,
// support only the full file truncation.
rc = session->truncate(session, uri, NULL, NULL, (const char*)config.data);
break;
case WT_OP_UPGRADE:
rc = session->upgrade(session, uri, (const char*)config.data);
break;
case WT_OP_VERIFY:
default:
rc = session->verify(session, uri, (const char*)config.data);
break;
}
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtConnHandle* conn_handle;
ErlNifBinary config;
if (enif_get_resource(env, argv[0], wt_conn_RESOURCE, (void**)&conn_handle) &&
enif_inspect_binary(env, argv[1], &config))
{
WT_CONNECTION* conn = conn_handle->conn;
WT_SESSION* session;
int rc = conn->open_session(conn, NULL, (const char *)config.data, &session);
if (rc == 0)
{
WtSessionHandle* session_handle =
enif_alloc_resource(wt_session_RESOURCE, sizeof(WtSessionHandle));
session_handle->session = session;
ERL_NIF_TERM result = enif_make_resource(env, session_handle);
enif_keep_resource(conn_handle);
enif_release_resource(session_handle);
return enif_make_tuple2(env, ATOM_OK, result);
}
else
{
return wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
WT_SESSION* session = session_handle->session;
int rc = session->close(session, NULL);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_create(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_CREATE);
}
static ERL_NIF_TERM wt_session_drop(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_DROP);
}
static ERL_NIF_TERM wt_session_rename(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
ErlNifBinary config;
Uri oldname, newname;
if (enif_get_string(env, argv[1], oldname, sizeof oldname, ERL_NIF_LATIN1) &&
enif_get_string(env, argv[2], newname, sizeof newname, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[3], &config))
{
WT_SESSION* session = session_handle->session;
int rc = session->rename(session, oldname, newname, (const char*)config.data);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_salvage(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_SALVAGE);
}
static ERL_NIF_TERM wt_session_checkpoint(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
ErlNifBinary config;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle) &&
enif_inspect_binary(env, argv[1], &config))
{
WT_SESSION* session = session_handle->session;
int rc = session->checkpoint(session, (const char*)config.data);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_truncate(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_TRUNCATE);
}
static ERL_NIF_TERM wt_session_upgrade(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_UPGRADE);
}
static ERL_NIF_TERM wt_session_verify(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_session_worker(env, argc, argv, WT_OP_VERIFY);
}
static ERL_NIF_TERM wt_session_delete(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
Uri uri;
ErlNifBinary key;
if (enif_get_string(env, argv[1], uri, sizeof uri, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &key))
{
WT_SESSION* session = session_handle->session;
WT_CURSOR* cursor;
int rc = session->open_cursor(session, uri, NULL, "raw", &cursor);
if (rc != 0)
{
return wt_strerror(env, rc);
}
WT_ITEM raw_key;
raw_key.data = key.data;
raw_key.size = key.size;
cursor->set_key(cursor, &raw_key);
rc = cursor->remove(cursor);
cursor->close(cursor);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_get(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
Uri uri;
ErlNifBinary key;
if (enif_get_string(env, argv[1], uri, sizeof uri, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &key))
{
WT_SESSION* session = session_handle->session;
WT_CURSOR* cursor;
int rc = session->open_cursor(session, uri, NULL, "overwrite,raw", &cursor);
if (rc != 0)
{
return wt_strerror(env, rc);
}
WT_ITEM raw_key, raw_value;
raw_key.data = key.data;
raw_key.size = key.size;
cursor->set_key(cursor, &raw_key);
rc = cursor->search(cursor);
if (rc == 0)
{
rc = cursor->get_value(cursor, &raw_value);
if (rc == 0)
{
ERL_NIF_TERM value;
unsigned char* bin = enif_make_new_binary(env, raw_value.size, &value);
memcpy(bin, raw_value.data, raw_value.size);
cursor->close(cursor);
return enif_make_tuple2(env, ATOM_OK, value);
}
}
cursor->close(cursor);
return wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_session_put(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
Uri uri;
ErlNifBinary key, value;
if (enif_get_string(env, argv[1], uri, sizeof uri, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &key) &&
enif_inspect_binary(env, argv[3], &value))
{
WT_SESSION* session = session_handle->session;
WT_CURSOR* cursor;
int rc = session->open_cursor(session, uri, NULL, "overwrite,raw", &cursor);
if (rc != 0)
{
return wt_strerror(env, rc);
}
WT_ITEM raw_key, raw_value;
raw_key.data = key.data;
raw_key.size = key.size;
cursor->set_key(cursor, &raw_key);
raw_value.data = value.data;
raw_value.size = value.size;
cursor->set_value(cursor, &raw_value);
rc = cursor->insert(cursor);
cursor->close(cursor);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_open(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtSessionHandle* session_handle;
if (enif_get_resource(env, argv[0], wt_session_RESOURCE, (void**)&session_handle))
{
WT_CURSOR* cursor;
Uri uri;
if (enif_get_string(env, argv[1], uri, sizeof uri, ERL_NIF_LATIN1))
{
WT_SESSION* session = session_handle->session;
int rc = session->open_cursor(session, uri, NULL, "overwrite,raw", &cursor);
if (rc == 0)
{
WtCursorHandle* cursor_handle =
enif_alloc_resource(wt_cursor_RESOURCE, sizeof(WtCursorHandle));
cursor_handle->cursor = cursor;
ERL_NIF_TERM result = enif_make_resource(env, cursor_handle);
enif_keep_resource(session_handle);
enif_release_resource(cursor_handle);
return enif_make_tuple2(env, ATOM_OK, result);
}
else
{
return wt_strerror(env, rc);
}
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_close(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtCursorHandle *cursor_handle;
if (enif_get_resource(env, argv[0], wt_cursor_RESOURCE, (void**)&cursor_handle))
{
WT_CURSOR* cursor = cursor_handle->cursor;
int rc = cursor->close(cursor);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_key_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc)
{
if (rc == 0)
{
WT_ITEM raw_key;
rc = cursor->get_key(cursor, &raw_key);
if (rc == 0)
{
ERL_NIF_TERM key;
memcpy(enif_make_new_binary(env, raw_key.size, &key), raw_key.data, raw_key.size);
return enif_make_tuple2(env, ATOM_OK, key);
}
}
return wt_strerror(env, rc);
}
static ERL_NIF_TERM wt_cursor_kv_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc)
{
if (rc == 0)
{
WT_ITEM raw_key, raw_value;
rc = cursor->get_key(cursor, &raw_key);
if (rc == 0)
{
rc = cursor->get_value(cursor, &raw_value);
if (rc == 0)
{
ERL_NIF_TERM key, value;
memcpy(enif_make_new_binary(env, raw_key.size, &key), raw_key.data, raw_key.size);
memcpy(enif_make_new_binary(env, raw_value.size, &value), raw_value.data, raw_value.size);
return enif_make_tuple3(env, ATOM_OK, key, value);
}
}
}
return wt_strerror(env, rc);
}
static ERL_NIF_TERM wt_cursor_value_ret(ErlNifEnv* env, WT_CURSOR *cursor, int rc)
{
if (rc == 0)
{
WT_ITEM raw_value;
rc = cursor->get_value(cursor, &raw_value);
if (rc == 0)
{
ERL_NIF_TERM value;
memcpy(enif_make_new_binary(env, raw_value.size, &value), raw_value.data, raw_value.size);
return enif_make_tuple2(env, ATOM_OK, value);
}
}
return wt_strerror(env, rc);
}
static ERL_NIF_TERM wt_cursor_np_worker(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[],
CursorRetFun cursor_ret, int prev)
{
WtCursorHandle *cursor_handle;
if (enif_get_resource(env, argv[0], wt_cursor_RESOURCE, (void**)&cursor_handle))
{
WT_CURSOR* cursor = cursor_handle->cursor;
return cursor_ret(env, cursor, prev == 0 ? cursor->next(cursor) : cursor->prev(cursor));
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_next(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_kv_ret, 0);
}
static ERL_NIF_TERM wt_cursor_next_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_key_ret, 0);
}
static ERL_NIF_TERM wt_cursor_next_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_value_ret, 0);
}
static ERL_NIF_TERM wt_cursor_prev(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_kv_ret, 1);
}
static ERL_NIF_TERM wt_cursor_prev_key(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_key_ret, 1);
}
static ERL_NIF_TERM wt_cursor_prev_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_np_worker(env, argc, argv, wt_cursor_value_ret, 1);
}
static ERL_NIF_TERM wt_cursor_search_worker(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[], int near)
{
WtCursorHandle *cursor_handle;
ErlNifBinary key;
if (enif_get_resource(env, argv[0], wt_cursor_RESOURCE, (void**)&cursor_handle) &&
enif_inspect_binary(env, argv[1], &key))
{
WT_CURSOR* cursor = cursor_handle->cursor;
WT_ITEM raw_key;
int exact;
raw_key.data = key.data;
raw_key.size = key.size;
cursor->set_key(cursor, &raw_key);
// We currently ignore the less-than, greater-than or equals-to return information
// from the cursor.search_near method.
return wt_cursor_value_ret(env, cursor,
near == 1 ?
cursor->search_near(cursor, &exact) : cursor->search(cursor));
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_search(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_search_worker(env, argc, argv, 0);
}
static ERL_NIF_TERM wt_cursor_search_near(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_search_worker(env, argc, argv, 1);
}
static ERL_NIF_TERM wt_cursor_reset(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
WtCursorHandle *cursor_handle;
if (enif_get_resource(env, argv[0], wt_cursor_RESOURCE, (void**)&cursor_handle))
{
WT_CURSOR* cursor = cursor_handle->cursor;
int rc = cursor->reset(cursor);
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
return enif_make_badarg(env);
}
#define WT_OP_CURSOR_INSERT 1
#define WT_OP_CURSOR_UPDATE 2
#define WT_OP_CURSOR_REMOVE 3
static inline ERL_NIF_TERM wt_cursor_data_op(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[], int op)
{
WtCursorHandle *cursor_handle;
if (enif_get_resource(env, argv[0], wt_cursor_RESOURCE, (void**)&cursor_handle))
{
ErlNifBinary key, value;
int rc;
if (enif_inspect_binary(env, argv[1], &key) && enif_inspect_binary(env, argv[2], &value))
{
WT_CURSOR* cursor = cursor_handle->cursor;
WT_ITEM raw_key, raw_value;
raw_key.data = key.data;
raw_key.size = key.size;
cursor->set_key(cursor, &raw_key);
raw_value.data = value.data;
raw_value.size = value.size;
cursor->set_value(cursor, &raw_value);
switch (op)
{
case WT_OP_CURSOR_INSERT:
rc = cursor->insert(cursor);
break;
case WT_OP_CURSOR_UPDATE:
rc = cursor->update(cursor);
break;
case WT_OP_CURSOR_REMOVE:
default:
rc = cursor->remove(cursor);
break;
}
return rc == 0 ? ATOM_OK : wt_strerror(env, rc);
}
}
return enif_make_badarg(env);
}
static ERL_NIF_TERM wt_cursor_insert(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_data_op(env, argc, argv, WT_OP_CURSOR_INSERT);
}
static ERL_NIF_TERM wt_cursor_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_data_op(env, argc, argv, WT_OP_CURSOR_UPDATE);
}
static ERL_NIF_TERM wt_cursor_remove(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
return wt_cursor_data_op(env, argc, argv, WT_OP_CURSOR_REMOVE);
}
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
ErlNifResourceFlags flags = ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER;
wt_conn_RESOURCE = enif_open_resource_type(env, NULL, "wt_conn_resource", NULL, flags, NULL);
wt_session_RESOURCE = enif_open_resource_type(env, NULL, "wt_session_resource", NULL, flags, NULL);
wt_cursor_RESOURCE = enif_open_resource_type(env, NULL, "wt_cursor_resource", NULL, flags, NULL);
ATOM_ERROR = enif_make_atom(env, "error");
ATOM_OK = enif_make_atom(env, "ok");
return 0;
}
ERL_NIF_INIT(wt, nif_funcs, &on_load, NULL, NULL, NULL);

File diff suppressed because it is too large Load diff

View file

@ -1,12 +1,12 @@
#!/bin/sh
# This script adds wterl to a riak github repo. Run it in the riak repo
# This script adds wt to a riak github repo. Run it in the riak repo
# directory.
#
# First it adds wterl, then runs "make all devrel" and then enables the
# wterl storage backend in the resulting dev nodes.
# First it adds wt, then runs "make all devrel" and then enables the
# wt storage backend in the resulting dev nodes.
#
# This script is intended to be temporary. Once wterl is made into a proper
# This script is intended to be temporary. Once wt is made into a proper
# riak citizen, this script will no longer be needed.
set -e
@ -35,17 +35,17 @@ fi
rebar get-deps
file=./deps/riak_kv/src/riak_kv.app.src
if ! grep -q wterl $file ; then
if ! grep -q hanoidb $file && ! grep -q wt $file ; then
echo
echo "Modifying $file, saving the original as ${file}.orig ..."
perl -i.orig -pe '/\bos_mon,/ && print qq( wterl,\n)' $file
perl -i.orig -pe '/\bos_mon,/ && print qq( wt,\n)' $file
fi
file=./deps/riak_kv/rebar.config
if ! grep -q wterl $file ; then
if ! grep -q wt $file ; then
echo
echo "Modifying $file, saving the original as ${file}.orig ..."
perl -i.orig -pe '/\bsext\b/ && print qq( {wterl, ".*", {git, "git\@github.com:basho-labs/wterl.git", "master"}},\n)' $file
perl -i.orig -pe '/\bsext\b/ && print qq( {wt, ".*", {git, "git\@github.com:basho-labs/wterl.git", "gsb-shared-cache-across-vnodes"}},\n)' $file
fi
rebar get-deps
@ -55,6 +55,6 @@ make all stagedevrel
echo
echo 'Modifying all dev/dev*/etc/app.config files, saving originals with .orig suffix...'
perl -i.orig -ne 'if (/\bstorage_backend,/) { s/(storage_backend, )[^\}]+/\1riak_kv_wterl_backend/; print } elsif (/\{eleveldb,/) { $eleveldb++; print } elsif ($eleveldb && /^\s+\]\},/) { $eleveldb = 0; print; print qq(\n {wterl, [\n {data_root, "./data/wt"}\n ]},\n\n) } else { print }' dev/dev*/etc/app.config
perl -i.orig -ne 'if (/\bstorage_backend,/) { s/(storage_backend, )[^\}]+/\1riak_kv_wiredtiger_backend/; print } elsif (/\{eleveldb,/) { $eleveldb++; print } elsif ($eleveldb && /^\s+\]\},/) { $eleveldb = 0; print; print qq(\n {wt, [\n {data_root, "./data/wt"}\n ]},\n\n) } else { print }' dev/dev*/etc/app.config
exit 0

View file

@ -1,6 +0,0 @@
%%%% This is the WiredTiger section
%% @doc wiredtiger data_root
{mapping, "wiredtiger.data_root", "wterl.data_root", [
{default, "{{platform_data_dir}}/wiredtiger"}
]}.

View file

@ -1,46 +1,45 @@
%%-*- mode: erlang -*-
%% ex: ft=erlang ts=4 sw=4 et
{require_otp_vsn, "R1[567]"}.
{require_otp_vsn, "R1[456]"}.
{cover_enabled, true}.
%{eunit_opts, [verbose, {report, {eunit_surefire, [{dir, "."}]}}]}.
{eunit_opts, [verbose, {report, {eunit_surefire, [{dir, "."}]}}]}.
{erl_opts, [
%native, {hipe, [o3,verbose]}, inline, {inline_size, 1024},
{parse_transform, lager_transform},
{erl_opts, [%{d,'DEBUG',true},
debug_info,
{d,'DEBUG',true},
strict_validation,
fail_on_warning,
%warn_missing_spec,
warn_bif_clash,
warn_deprecated_function,
warn_unused_vars,
warn_export_all,
warn_shadow_vars,
warn_unused_import,
warn_unused_function,
warn_bif_clash,
warn_unused_record,
warn_deprecated_function,
warn_obsolete_guard,
warn_export_vars,
warn_exported_vars,
warn_obsolete_guard,
warn_shadow_vars,
warn_untyped_record,
warn_unused_function,
warn_unused_import,
warn_unused_record,
warn_unused_vars
{parse_transform, lager_transform}
%warn_missing_spec,
%strict_validation
]}.
{xref_checks, [undefined_function_calls, deprecated_function_calls]}.
{xref_checks, [undefined_function_calls]}.
{deps, [
{lager, "2.*", {git, "git://github.com/basho/lager", {branch, "master"}}}
{lager, "1.2.2", {git, "git://github.com/basho/lager", {tag, "1.2.2"}}}
]}.
{port_specs, [{"priv/wterl.so", ["c_src/*.c"]}]}.
{port_specs, [{"priv/wt.so", ["c_src/*.c"]}]}.
{port_env, [
{"DRV_CFLAGS", "$DRV_CFLAGS -O3 -mtune=native -march=native -fPIC -Wall -Wextra -Werror -I c_src/system/include"},
{"DRV_LDFLAGS", "$DRV_LDFLAGS -Wl,-rpath,lib/wterl/priv:lib/wterl-0.9.0/priv:priv -Lc_src/system/lib -lwiredtiger"}
{"DRV_CFLAGS", "$DRV_CFLAGS -Werror -I c_src/system/include"},
{"DRV_LDFLAGS", "$DRV_LDFLAGS c_src/system/lib/libwiredtiger.a"}
]}.
{pre_hooks, [{compile, "c_src/build_deps.sh compile"}]}.
{pre_hooks, [{compile, "c_src/build_deps.sh"}]}.
{post_hooks, [{clean, "c_src/build_deps.sh clean"}]}.

View file

@ -1,54 +0,0 @@
%% -------------------------------------------------------------------
%%
%% async_nif: An async thread-pool layer for Erlang's NIF API
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%% Author: Gregory Burd <greg@basho.com> <greg@burd.me>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-define(ASYNC_NIF_CALL(Fun, Args),
F = fun(F, T) ->
R = erlang:make_ref(),
case erlang:apply(Fun, [R|Args]) of
{ok, {enqueued, PctBusy}} ->
if
PctBusy > 0.25 andalso PctBusy =< 1.0 ->
erlang:bump_reductions(erlang:trunc(2000 * PctBusy));
true ->
ok
end,
receive
{R, {error, shutdown}=Error} ->
%% Work unit was queued, but not executed.
Error;
{R, {error, _Reason}=Error} ->
%% Work unit returned an error.
Error;
{R, Reply} ->
Reply
end;
{error, eagain} ->
case T of
3 -> not_found;
_ -> F(F, T + 1)
end;
Other ->
Other
end
end,
F(F, 1)).

View file

@ -0,0 +1,537 @@
%% -------------------------------------------------------------------
%%
%% riak_kv_wiredtiger_backend: Use WiredTiger for Riak/KV storage
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(riak_kv_wiredtiger_backend).
-behavior(temp_riak_kv_backend).
-author('Steve Vinoski <steve@basho.com>').
%% KV Backend API
-export([api_version/0,
capabilities/1,
capabilities/2,
start/2,
stop/1,
get/3,
put/5,
delete/4,
drop/1,
fold_buckets/4,
fold_keys/4,
fold_objects/4,
is_empty/1,
status/1,
callback/3]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
-define(API_VERSION, 1).
%% TODO: for when this backend supports 2i
%%-define(CAPABILITIES, [async_fold, indexes]).
-define(CAPABILITIES, [async_fold]).
-record(state, {conn :: wt:connection(), %% There is one shared conection
session :: wt:session(), %% But a session per
table :: string(),
partition :: integer()}).
-type state() :: #state{}.
-type config() :: [{atom(), term()}].
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc Return the major version of the
%% current API.
-spec api_version() -> {ok, integer()}.
api_version() ->
{ok, ?API_VERSION}.
%% @doc Return the capabilities of the backend.
-spec capabilities(state()) -> {ok, [atom()]}.
capabilities(_) ->
{ok, ?CAPABILITIES}.
%% @doc Return the capabilities of the backend.
-spec capabilities(riak_object:bucket(), state()) -> {ok, [atom()]}.
capabilities(_, _) ->
{ok, ?CAPABILITIES}.
%% @doc Start the WiredTiger backend
-spec start(integer(), config()) -> {ok, state()} | {error, term()}.
start(Partition, Config) ->
%% Get the data root directory
case app_helper:get_prop_or_env(data_root, Config, wt) of
undefined ->
lager:error("Failed to startup WiredTiger: data_root is not set"),
{error, data_root_unset};
DataRoot ->
AppStarted =
case application:start(wt) of
ok ->
ok;
{error, {already_started, _}} ->
ok;
{error, Reason} ->
lager:error("Failed to start WiredTiger: ~p", [Reason]),
{error, Reason}
end,
case AppStarted of
ok ->
CacheSize = size_cache(64, Config),
SessionMax =
case app_helper:get_env(riak_core, ring_creation_size) of
undefined -> 1024;
RingSize ->
case RingSize < 512 of
true -> 1024;
false -> RingSize * 2
end
end,
WTConfig =
case proplists:lookup(wt, Config) of
none ->
case application:get_env(wt) of
undefined ->
[];
WTSectionOfEnv ->
WTSectionOfEnv
end;
WTSectionOfConfig ->
WTSectionOfConfig
end,
ConnectionOpts = lists:merge([
WTConfig,
[{create, true},
{logging, true},
{transactional, true},
{session_max, SessionMax},
{shared_cache, [{chunk, "64MB"},
{min, "1GB"},
{name, "wt-cache"},
{size, CacheSize}]},
{sync, false}
%% {verbose,
%% ["block", "shared_cache", "ckpt", "evict",
%% "evictserver", "fileops", "hazard", "lsm",
%% "mutex", "read", "readserver", "reconcile",
%% "salvage", "verify", "write"]}
]]),
ok = filelib:ensure_dir(filename:join(DataRoot, "x")),
case wt_conn:open(DataRoot, ConnectionOpts) of
{ok, ConnRef} ->
Table = "lsm:wt" ++ integer_to_list(Partition),
{ok, SRef} = wt:session_open(ConnRef),
SessionOpts =
[%TODO {block_compressor, "snappy"},
{internal_page_max, "128K"},
{leaf_page_max, "256K"},
{lsm_chunk_size, "256MB"},
{lsm_bloom_config, [{leaf_page_max, "16MB"}]} ],
ok = wt:session_create(SRef, Table, wt:config_to_bin(SessionOpts)),
{ok, #state{conn=ConnRef,
table=Table,
session=SRef,
partition=Partition}};
{error, ConnReason}=ConnError ->
lager:error("Failed to start WiredTiger storage backend: ~p\n",
[ConnReason]),
ConnError
end;
Error ->
Error
end
end.
%% @doc Stop the WiredTiger backend
-spec stop(state()) -> ok.
stop(#state{conn=ConnRef, session=SRef}) ->
ok = wt:session_close(SRef),
wt_conn:close(ConnRef).
%% @doc Retrieve an object from the WiredTiger backend
-spec get(riak_object:bucket(), riak_object:key(), state()) ->
{ok, any(), state()} |
{ok, not_found, state()} |
{error, term(), state()}.
get(Bucket, Key, #state{table=Table, session=SRef}=State) ->
WTKey = to_object_key(Bucket, Key),
case wt:session_get(SRef, Table, WTKey) of
{ok, Value} ->
{ok, Value, State};
not_found ->
{error, not_found, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Insert an object into the WiredTiger backend.
%% NOTE: The WiredTiger backend does not currently support
%% secondary indexing and the_IndexSpecs parameter
%% is ignored.
-type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}.
-spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) ->
{ok, state()} |
{error, term(), state()}.
put(Bucket, PrimaryKey, _IndexSpecs, Val, #state{table=Table, session=SRef}=State) ->
WTKey = to_object_key(Bucket, PrimaryKey),
case wt:session_put(SRef, Table, WTKey, Val) of
ok ->
{ok, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Delete an object from the WiredTiger backend
%% NOTE: The WiredTiger backend does not currently support
%% secondary indexing and the_IndexSpecs parameter
%% is ignored.
-spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) ->
{ok, state()} |
{error, term(), state()}.
delete(Bucket, Key, _IndexSpecs, #state{table=Table, session=SRef}=State) ->
WTKey = to_object_key(Bucket, Key),
case wt:session_delete(SRef, Table, WTKey) of
ok ->
{ok, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Fold over all the buckets
-spec fold_buckets(riak_kv_backend:fold_buckets_fun(),
any(),
[],
state()) -> {ok, any()} | {async, fun()}.
fold_buckets(FoldBucketsFun, Acc, Opts, #state{conn=ConnRef, table=Table}) ->
FoldFun = fold_buckets_fun(FoldBucketsFun),
BucketFolder =
fun() ->
{ok, SRef} = wt:session_open(ConnRef),
{ok, Cursor} = wt:cursor_open(SRef, Table),
try
{FoldResult, _} =
wt:fold_keys(Cursor, FoldFun, {Acc, []}),
FoldResult
catch
{break, AccFinal} ->
AccFinal
after
ok = wt:cursor_close(Cursor),
ok = wt:session_close(SRef)
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, BucketFolder};
false ->
{ok, BucketFolder()}
end.
%% @doc Fold over all the keys for one or all buckets.
-spec fold_keys(riak_kv_backend:fold_keys_fun(),
any(),
[{atom(), term()}],
state()) -> {ok, term()} | {async, fun()}.
fold_keys(FoldKeysFun, Acc, Opts, #state{conn=ConnRef, table=Table}) ->
%% Figure out how we should limit the fold: by bucket, by
%% secondary index, or neither (fold across everything.)
Bucket = lists:keyfind(bucket, 1, Opts),
Index = lists:keyfind(index, 1, Opts),
%% Multiple limiters may exist. Take the most specific limiter.
Limiter =
if Index /= false -> Index;
Bucket /= false -> Bucket;
true -> undefined
end,
%% Set up the fold...
FoldFun = fold_keys_fun(FoldKeysFun, Limiter),
KeyFolder =
fun() ->
{ok, SRef} = wt:session_open(ConnRef),
{ok, Cursor} = wt:cursor_open(SRef, Table),
try
wt:fold_keys(Cursor, FoldFun, Acc)
catch
{break, AccFinal} ->
AccFinal
after
ok = wt:cursor_close(Cursor),
ok = wt:session_close(SRef)
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, KeyFolder};
false ->
{ok, KeyFolder()}
end.
%% @doc Fold over all the objects for one or all buckets.
-spec fold_objects(riak_kv_backend:fold_objects_fun(),
any(),
[{atom(), term()}],
state()) -> {ok, any()} | {async, fun()}.
fold_objects(FoldObjectsFun, Acc, Opts, #state{conn=ConnRef, table=Table}) ->
Bucket = proplists:get_value(bucket, Opts),
FoldFun = fold_objects_fun(FoldObjectsFun, Bucket),
ObjectFolder =
fun() ->
{ok, SRef} = wt:session_open(ConnRef),
{ok, Cursor} = wt:cursor_open(SRef, Table),
try
wt:fold(Cursor, FoldFun, Acc)
catch
{break, AccFinal} ->
AccFinal
after
ok = wt:cursor_close(Cursor),
ok = wt:session_close(SRef)
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, ObjectFolder};
false ->
{ok, ObjectFolder()}
end.
%% @doc Delete all objects from this WiredTiger backend
-spec drop(state()) -> {ok, state()} | {error, term(), state()}.
drop(#state{table=Table, session=SRef}=State) ->
case wt:session_truncate(SRef, Table) of
ok ->
{ok, State};
Error ->
{error, Error, State}
end.
%% @doc Returns true if this WiredTiger backend contains any
%% non-tombstone values; otherwise returns false.
-spec is_empty(state()) -> boolean().
is_empty(#state{table=Table, session=SRef}) ->
{ok, Cursor} = wt:cursor_open(SRef, Table),
try
not_found =:= wt:cursor_next(Cursor)
after
ok = wt:cursor_close(Cursor)
end.
%% @doc Get the status information for this WiredTiger backend
-spec status(state()) -> [{atom(), term()}].
status(#state{table=Table, session=SRef}) ->
{ok, Cursor} = wt:cursor_open(SRef, "statistics:"++Table),
try
Stats = fetch_status(Cursor),
[{stats, Stats}]
after
ok = wt:cursor_close(Cursor)
end.
%% @doc Register an asynchronous callback
-spec callback(reference(), any(), state()) -> {ok, state()}.
callback(_Ref, _Msg, State) ->
{ok, State}.
%% ===================================================================
%% Internal functions
%% ===================================================================
%% @private
%% Return a function to fold over the buckets on this backend
fold_buckets_fun(FoldBucketsFun) ->
fun(BK, {Acc, LastBucket}) ->
case from_object_key(BK) of
{LastBucket, _} ->
{Acc, LastBucket};
{Bucket, _} ->
{FoldBucketsFun(Bucket, Acc), Bucket};
_ ->
throw({break, Acc})
end
end.
%% @private
%% Return a function to fold over keys on this backend
fold_keys_fun(FoldKeysFun, undefined) ->
%% Fold across everything...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}) ->
%% Fold across a specific bucket...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when Bucket == FilterBucket ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$bucket">>, _}}) ->
%% 2I exact match query on special $bucket field...
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, FilterField, FilterTerm}}) ->
%% Rewrite 2I exact match query as a range...
NewQuery = {range, FilterField, FilterTerm, FilterTerm},
fold_keys_fun(FoldKeysFun, {index, FilterBucket, NewQuery});
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, <<"$key">>, StartKey, EndKey}}) ->
%% 2I range query on special $key field...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when FilterBucket == Bucket,
StartKey =< Key,
EndKey >= Key ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, FilterField, StartTerm, EndTerm}}) ->
%% 2I range query...
fun(StorageKey, Acc) ->
case from_index_key(StorageKey) of
{Bucket, Key, Field, Term} when FilterBucket == Bucket,
FilterField == Field,
StartTerm =< Term,
EndTerm >= Term ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(_FoldKeysFun, Other) ->
throw({unknown_limiter, Other}).
%% @private
%% Return a function to fold over the objects on this backend
fold_objects_fun(FoldObjectsFun, FilterBucket) ->
%% 2I does not support fold objects at this time, so this is much
%% simpler than fold_keys_fun.
fun({StorageKey, Value}, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when FilterBucket == undefined;
Bucket == FilterBucket ->
FoldObjectsFun(Bucket, Key, Value, Acc);
_ ->
throw({break, Acc})
end
end.
to_object_key(Bucket, Key) ->
sext:encode({o, Bucket, Key}).
from_object_key(LKey) ->
case sext:decode(LKey) of
{o, Bucket, Key} ->
{Bucket, Key};
_ ->
undefined
end.
from_index_key(LKey) ->
case sext:decode(LKey) of
{i, Bucket, Field, Term, Key} ->
{Bucket, Key, Field, Term};
_ ->
undefined
end.
%% @private
%% Return all status from WiredTiger statistics cursor
fetch_status(Cursor) ->
fetch_status(Cursor, wt:cursor_next_value(Cursor), []).
fetch_status(_Cursor, not_found, Acc) ->
lists:reverse(Acc);
fetch_status(Cursor, {ok, Stat}, Acc) ->
[What,Val|_] = [binary_to_list(B) || B <- binary:split(Stat, [<<0>>], [global])],
fetch_status(Cursor, wt:cursor_next_value(Cursor), [{What,Val}|Acc]).
size_cache(ChunkSize, Config) ->
case proplists:get_value(cache_size, Config) of
undefined ->
case application:get_env(wt, cache_size) of
{ok, Value} ->
Value;
_ ->
SizeEst = best_guess_at_a_reasonable_cache_size(ChunkSize),
%% lager:warning("Using estimated best cache size of ~p for WiredTiger backend.", [SizeEst]),
SizeEst
end;
Value ->
Value
end.
best_guess_at_a_reasonable_cache_size(ChunkSizeInMB) ->
RunningApps = application:which_applications(),
case proplists:is_defined(sasl, RunningApps) andalso
proplists:is_defined(os_mon, RunningApps) of
true ->
MemInfo = memsup:get_system_memory_data(),
AvailableRAM = proplists:get_value(system_total_memory, MemInfo),
FreeRAM = proplists:get_value(free_memory, MemInfo),
CurrentlyInUseByErlang = proplists:get_value(total, erlang:memory()),
OneThirdOfRemainingRAM = ((AvailableRAM - CurrentlyInUseByErlang) div 3),
Remainder = OneThirdOfRemainingRAM rem (ChunkSizeInMB * 1024 * 1024),
EstCacheSize = (OneThirdOfRemainingRAM - Remainder),
GuessedSize =
case EstCacheSize > FreeRAM of
true ->
FreeRAM - (FreeRAM rem (ChunkSizeInMB * 1024 * 1024));
_ ->
EstCacheSize
end,
case GuessedSize < 809238528 of
true -> "1GB";
false -> integer_to_list(GuessedSize div (1024 * 1024)) ++ "MB"
end;
false ->
"1GB"
end.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
simple_test_() ->
?assertCmd("rm -rf test/wt-backend"),
application:set_env(wt, data_root, "test/wt-backend"),
temp_riak_kv_backend:standard_test(?MODULE, []).
custom_config_test_() ->
?assertCmd("rm -rf test/wt-backend"),
application:set_env(wt, data_root, ""),
temp_riak_kv_backend:standard_test(?MODULE, [{data_root, "test/wt-backend"}]).
-endif.

View file

@ -1,624 +0,0 @@
%% -------------------------------------------------------------------
%%
%% riak_kv_wterl_backend: WiredTiger Driver for Riak
%%
%% Copyright (c) 2012-2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(riak_kv_wterl_backend).
-behavior(temp_riak_kv_backend).
-compile([{parse_transform, lager_transform}]).
%% KV Backend API
-export([api_version/0,
capabilities/1,
capabilities/2,
start/2,
stop/1,
get/3,
put/5,
delete/4,
drop/1,
fold_buckets/4,
fold_keys/4,
fold_objects/4,
is_empty/1,
status/1,
callback/3]).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
-endif.
-define(API_VERSION, 1).
%% TODO: for when this backend supports 2i
%%-define(CAPABILITIES, [async_fold, indexes]).
-define(CAPABILITIES, [async_fold]).
-record(state, {table :: string(),
type :: string(),
connection :: wterl:connection()}).
-type state() :: #state{}.
-type config() :: [{atom(), term()}].
%% ===================================================================
%% Public API
%% ===================================================================
%% @doc Return the major version of the
%% current API.
-spec api_version() -> {ok, integer()}.
api_version() ->
{ok, ?API_VERSION}.
%% @doc Return the capabilities of the backend.
-spec capabilities(state()) -> {ok, [atom()]}.
capabilities(_) ->
{ok, ?CAPABILITIES}.
%% @doc Return the capabilities of the backend.
-spec capabilities(riak_object:bucket(), state()) -> {ok, [atom()]}.
capabilities(_, _) ->
{ok, ?CAPABILITIES}.
%% @doc Start the wterl backend
-spec start(integer(), config()) -> {ok, state()} | {error, term()}.
start(Partition, Config) ->
AppStart =
case application:start(wterl) of
ok ->
ok;
{error, {already_started, _}} ->
ok;
{error, Reason1} ->
lager:error("Failed to start wterl: ~p", [Reason1]),
{error, Reason1}
end,
case AppStart of
ok ->
Type =
case wterl:config_value(type, Config, "lsm") of
{type, "lsm"} -> "lsm";
{type, "table"} -> "table";
{type, "btree"} -> "table";
{type, BadType} ->
lager:info("wterl:start ignoring unknown type ~p, using lsm instead", [BadType]),
"lsm";
_ ->
lager:info("wterl:start ignoring mistaken setting defaulting to lsm"),
"lsm"
end,
{ok, Connection} = establish_connection(Config, Type),
Table = Type ++ ":" ++ integer_to_list(Partition),
Compressor =
case wterl:config_value(block_compressor, Config, "snappy") of
{block_compressor, "snappy"}=C -> [C];
{block_compressor, "none"} -> [];
{block_compressor, none} -> [];
{block_compressor, _} -> [{block_compressor, "snappy"}];
_ -> [{block_compressor, "snappy"}]
end,
TableOpts =
case Type of
"lsm" ->
[{internal_page_max, "128K"},
{leaf_page_max, "16K"},
{lsm, [
{bloom_config, [{leaf_page_max, "8MB"}]},
{bloom_bit_count, 28},
{bloom_hash_count, 19},
{bloom_oldest, true},
{chunk_size, "100MB"},
{merge_threads, 2}
]}
] ++ Compressor;
"table" ->
Compressor
end,
case wterl:create(Connection, Table, TableOpts) of
ok ->
{ok, #state{table=Table, type=Type,
connection=Connection}};
{error, Reason3} ->
{error, Reason3}
end
end.
%% @doc Stop the wterl backend
-spec stop(state()) -> ok.
stop(_State) ->
ok. %% The connection is closed by wterl_conn:stop()
%% @doc Retrieve an object from the wterl backend
-spec get(riak_object:bucket(), riak_object:key(), state()) ->
{ok, any(), state()} |
{ok, not_found, state()} |
{error, term(), state()}.
get(Bucket, Key, #state{connection=Connection, table=Table}=State) ->
WTKey = to_object_key(Bucket, Key),
case wterl:get(Connection, Table, WTKey) of
{ok, Value} ->
{ok, Value, State};
not_found ->
{error, not_found, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Insert an object into the wterl backend.
%% NOTE: The wterl backend does not currently support
%% secondary indexing and the_IndexSpecs parameter
%% is ignored.
-type index_spec() :: {add, Index, SecondaryKey} | {remove, Index, SecondaryKey}.
-spec put(riak_object:bucket(), riak_object:key(), [index_spec()], binary(), state()) ->
{ok, state()} |
{error, term(), state()}.
put(Bucket, PrimaryKey, _IndexSpecs, Val, #state{connection=Connection, table=Table}=State) ->
case wterl:put(Connection, Table, to_object_key(Bucket, PrimaryKey), Val) of
ok ->
{ok, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Delete an object from the wterl backend
%% NOTE: The wterl backend does not currently support
%% secondary indexing and the_IndexSpecs parameter
%% is ignored.
-spec delete(riak_object:bucket(), riak_object:key(), [index_spec()], state()) ->
{ok, state()} |
{error, term(), state()}.
delete(Bucket, Key, _IndexSpecs, #state{connection=Connection, table=Table}=State) ->
case wterl:delete(Connection, Table, to_object_key(Bucket, Key)) of
ok ->
{ok, State};
{error, Reason} ->
{error, Reason, State}
end.
%% @doc Fold over all the buckets
-spec fold_buckets(riak_kv_backend:fold_buckets_fun(),
any(),
[],
state()) -> {ok, any()} | {async, fun()}.
fold_buckets(FoldBucketsFun, Acc, Opts, #state{connection=Connection, table=Table}) ->
FoldFun = fold_buckets_fun(FoldBucketsFun),
BucketFolder =
fun() ->
case wterl:cursor_open(Connection, Table) of
{error, {enoent, _Message}} ->
Acc;
{ok, Cursor} ->
try
{FoldResult, _} =
wterl:fold_keys(Cursor, FoldFun, {Acc, []}),
FoldResult
catch
{break, AccFinal} ->
AccFinal
after
ok = wterl:cursor_close(Cursor)
end
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, BucketFolder};
false ->
{ok, BucketFolder()}
end.
%% @doc Fold over all the keys for one or all buckets.
-spec fold_keys(riak_kv_backend:fold_keys_fun(),
any(),
[{atom(), term()}],
state()) -> {ok, term()} | {async, fun()}.
fold_keys(FoldKeysFun, Acc, Opts, #state{connection=Connection, table=Table}) ->
%% Figure out how we should limit the fold: by bucket, by
%% secondary index, or neither (fold across everything.)
Bucket = lists:keyfind(bucket, 1, Opts),
Index = lists:keyfind(index, 1, Opts),
%% Multiple limiters may exist. Take the most specific limiter.
Limiter =
if Index /= false -> Index;
Bucket /= false -> Bucket;
true -> undefined
end,
%% Set up the fold...
FoldFun = fold_keys_fun(FoldKeysFun, Limiter),
KeyFolder =
fun() ->
case wterl:cursor_open(Connection, Table) of
{error, {enoent, _Message}} ->
Acc;
{ok, Cursor} ->
try
wterl:fold_keys(Cursor, FoldFun, Acc)
catch
{break, AccFinal} ->
AccFinal
after
ok = wterl:cursor_close(Cursor)
end
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, KeyFolder};
false ->
{ok, KeyFolder()}
end.
%% @doc Fold over all the objects for one or all buckets.
-spec fold_objects(riak_kv_backend:fold_objects_fun(),
any(),
[{atom(), term()}],
state()) -> {ok, any()} | {async, fun()}.
fold_objects(FoldObjectsFun, Acc, Opts, #state{connection=Connection, table=Table}) ->
Bucket = proplists:get_value(bucket, Opts),
FoldFun = fold_objects_fun(FoldObjectsFun, Bucket),
ObjectFolder =
fun() ->
case wterl:cursor_open(Connection, Table) of
{error, {enoent, _Message}} ->
Acc;
{ok, Cursor} ->
try
wterl:fold(Cursor, FoldFun, Acc)
catch
{break, AccFinal} ->
AccFinal
after
case wterl:cursor_close(Cursor) of
ok ->
ok;
{error, {eperm, _}} -> %% TODO: review/fix
ok;
{error, _}=E ->
E
end
end
end
end,
case lists:member(async_fold, Opts) of
true ->
{async, ObjectFolder};
false ->
{ok, ObjectFolder()}
end.
%% @doc Delete all objects from this wterl backend
-spec drop(state()) -> {ok, state()} | {error, term(), state()}.
drop(#state{connection=Connection, table=Table}=State) ->
case wterl:drop(Connection, Table) of
ok ->
{ok, State};
{error, {ebusy, _}} -> %% TODO: review/fix
{ok, State};
Error ->
{error, Error, State}
end.
%% @doc Returns true if this wterl backend contains any
%% non-tombstone values; otherwise returns false.
-spec is_empty(state()) -> boolean().
is_empty(#state{connection=Connection, table=Table}) ->
case wterl:cursor_open(Connection, Table) of
{ok, Cursor} ->
IsEmpty =
case wterl:cursor_next(Cursor) of
not_found ->
true;
{error, {eperm, _}} ->
false; % TODO: review/fix this logic
_ ->
false
end,
wterl:cursor_close(Cursor),
IsEmpty;
{error, Reason2} ->
{error, Reason2}
end.
%% @doc Get the status information for this wterl backend
-spec status(state()) -> [{atom(), term()}].
status(#state{connection=Connection, table=Table}) ->
[].
%% case wterl:cursor_open(Connection, "statistics:" ++ Table, [{statistics_fast, true}]) of
%% {ok, Cursor} ->
%% TheStats =
%% case fetch_status(Cursor) of
%% {ok, Stats} ->
%% Stats;
%% {error, {eperm, _}} -> % TODO: review/fix this logic
%% {ok, []};
%% _ ->
%% {ok, []}
%% end,
%% wterl:cursor_close(Cursor),
%% TheStats;
%% {error, Reason2} ->
%% {error, Reason2}
%% end.
%% @doc Register an asynchronous callback
-spec callback(reference(), any(), state()) -> {ok, state()}.
callback(_Ref, _Msg, State) ->
{ok, State}.
%% ===================================================================
%% Internal functions
%% ===================================================================
%% @private
max_sessions(Config) ->
RingSize =
case app_helper:get_prop_or_env(ring_creation_size, Config, riak_core) of
undefined -> 1024;
Size -> Size
end,
Est = RingSize * erlang:system_info(schedulers),
case Est > 8192 of
true ->
8192;
false ->
case Est < 1024 of
true ->
1024;
false ->
Est
end
end.
%% @private
establish_connection(Config, Type) ->
%% Get the data root directory
case app_helper:get_prop_or_env(data_root, Config, wterl) of
undefined ->
lager:error("Failed to create wterl dir: data_root is not set"),
{error, data_root_unset};
DataRoot ->
ok = filelib:ensure_dir(filename:join(DataRoot, "x")),
%% WT Connection Options:
LogSetting = app_helper:get_prop_or_env(log, Config, wterl, false),
CheckpointSetting =
case Type =:= "lsm" of
true ->
case LogSetting of
true ->
%% Turn checkpoints on if logging is on, checkpoints enable log archival.
app_helper:get_prop_or_env(checkpoint, Config, wterl, [{wait, 30}]); % in seconds
_ ->
[]
end;
false ->
app_helper:get_prop_or_env(checkpoint, Config, wterl, [{wait, 30}])
end,
RequestedCacheSize = app_helper:get_prop_or_env(cache_size, Config, wterl),
ConnectionOpts =
orddict:from_list(
[ wterl:config_value(create, Config, true),
wterl:config_value(checkpoint_sync, Config, false),
wterl:config_value(transaction_sync, Config, "none"),
wterl:config_value(log, Config, [{enabled, LogSetting}]),
wterl:config_value(mmap, Config, false),
wterl:config_value(checkpoint, Config, CheckpointSetting),
wterl:config_value(session_max, Config, max_sessions(Config)),
wterl:config_value(cache_size, Config, size_cache(RequestedCacheSize)),
wterl:config_value(statistics, Config, [ "fast", "clear"]),
wterl:config_value(statistics_log, Config, [{wait, 600}]), % in seconds
wterl:config_value(verbose, Config, [ "salvage", "verify"
% Note: for some unknown reason, if you add these additional
% verbose flags Erlang SEGV's "size_object: bad tag for 0x80"
% no idea why... you've been warned.
%"block", "shared_cache", "reconcile", "evict", "lsm",
%"fileops", "read", "write", "readserver", "evictserver",
%"hazard", "mutex", "ckpt"
]) ] ++ proplists:get_value(wterl, Config, [])), % sec
%% WT Session Options:
SessionOpts = [{isolation, "snapshot"}],
case wterl_conn:open(DataRoot, ConnectionOpts, SessionOpts) of
{ok, Connection} ->
{ok, Connection};
{error, Reason2} ->
lager:error("Failed to establish a WiredTiger connection, wterl backend unable to start: ~p\n", [Reason2]),
{error, Reason2}
end
end.
%% @private
%% Return a function to fold over the buckets on this backend
fold_buckets_fun(FoldBucketsFun) ->
fun(BK, {Acc, LastBucket}) ->
case from_object_key(BK) of
{LastBucket, _} ->
{Acc, LastBucket};
{Bucket, _} ->
{FoldBucketsFun(Bucket, Acc), Bucket};
_ ->
throw({break, Acc})
end
end.
%% @private
%% Return a function to fold over keys on this backend
fold_keys_fun(FoldKeysFun, undefined) ->
%% Fold across everything...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket}) ->
%% Fold across a specific bucket...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when Bucket == FilterBucket ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, <<"$bucket">>, _}}) ->
%% 2I exact match query on special $bucket field...
fold_keys_fun(FoldKeysFun, {bucket, FilterBucket});
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {eq, FilterField, FilterTerm}}) ->
%% Rewrite 2I exact match query as a range...
NewQuery = {range, FilterField, FilterTerm, FilterTerm},
fold_keys_fun(FoldKeysFun, {index, FilterBucket, NewQuery});
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, <<"$key">>, StartKey, EndKey}}) ->
%% 2I range query on special $key field...
fun(StorageKey, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when FilterBucket == Bucket,
StartKey =< Key,
EndKey >= Key ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(FoldKeysFun, {index, FilterBucket, {range, FilterField, StartTerm, EndTerm}}) ->
%% 2I range query...
fun(StorageKey, Acc) ->
case from_index_key(StorageKey) of
{Bucket, Key, Field, Term} when FilterBucket == Bucket,
FilterField == Field,
StartTerm =< Term,
EndTerm >= Term ->
FoldKeysFun(Bucket, Key, Acc);
_ ->
throw({break, Acc})
end
end;
fold_keys_fun(_FoldKeysFun, Other) ->
throw({unknown_limiter, Other}).
%% @private
%% Return a function to fold over the objects on this backend
fold_objects_fun(FoldObjectsFun, FilterBucket) ->
%% 2I does not support fold objects at this time, so this is much
%% simpler than fold_keys_fun.
fun({StorageKey, Value}, Acc) ->
case from_object_key(StorageKey) of
{Bucket, Key} when FilterBucket == undefined;
Bucket == FilterBucket ->
FoldObjectsFun(Bucket, Key, Value, Acc);
_ ->
throw({break, Acc})
end
end.
to_object_key(Bucket, Key) ->
sext:encode({o, Bucket, Key}).
from_object_key(LKey) ->
case sext:decode(LKey) of
{o, Bucket, Key} ->
{Bucket, Key};
_ ->
undefined
end.
from_index_key(LKey) ->
case sext:decode(LKey) of
{i, Bucket, Field, Term, Key} ->
{Bucket, Key, Field, Term};
_ ->
undefined
end.
%% @private
%% Return all status from wterl statistics cursor
%% fetch_status(Cursor) ->
%% {ok, fetch_status(Cursor, wterl:cursor_next_value(Cursor), [])}.
%% fetch_status(_Cursor, {error, _}, Acc) ->
%% lists:reverse(Acc);
%% fetch_status(_Cursor, not_found, Acc) ->
%% lists:reverse(Acc);
%% fetch_status(Cursor, {ok, Stat}, Acc) ->
%% [What,Val|_] = [binary_to_list(B) || B <- binary:split(Stat, [<<0>>], [global])],
%% fetch_status(Cursor, wterl:cursor_next_value(Cursor), [{What,Val}|Acc]).
size_cache(RequestedSize) ->
Size =
case RequestedSize of
undefined ->
RunningApps = application:which_applications(),
FinalGuess =
case proplists:is_defined(sasl, RunningApps) andalso
proplists:is_defined(os_mon, RunningApps) of
true ->
Memory = memsup:get_system_memory_data(),
TotalRAM = proplists:get_value(system_total_memory, Memory),
FreeRAM = proplists:get_value(free_memory, Memory),
UsedByBeam = proplists:get_value(total, erlang:memory()),
Target = ((TotalRAM - UsedByBeam) div 3),
FirstGuess = (Target - (Target rem (1024 * 1024))),
SecondGuess =
case FirstGuess > FreeRAM of
true -> FreeRAM - (FreeRAM rem (1024 * 1024));
_ -> FirstGuess
end,
case SecondGuess < 1073741824 of %% < 1GB?
true -> "1GB";
false ->
ThirdGuess = SecondGuess div (1024 * 1024),
integer_to_list(ThirdGuess) ++ "MB"
end;
false ->
"1GB"
end,
application:set_env(wterl, cache_size, FinalGuess),
FinalGuess;
Value when is_list(Value) ->
Value;
Value when is_number(Value) ->
integer_to_list(Value)
end,
Size.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
simple_test_() ->
{ok, CWD} = file:get_cwd(),
rmdir:path(filename:join([CWD, "test/wterl-backend"])), %?assertCmd("rm -rf test/wterl-backend"),
application:set_env(wterl, data_root, "test/wterl-backend"),
temp_riak_kv_backend:standard_test(?MODULE, []).
custom_config_test_() ->
{ok, CWD} = file:get_cwd(),
rmdir:path(filename:join([CWD, "test/wterl-backend"])), %?assertCmd("rm -rf test/wterl-backend"),
application:set_env(wterl, data_root, ""),
temp_riak_kv_backend:standard_test(?MODULE, [{data_root, "test/wterl-backend"}]).
-endif.

View file

@ -1,26 +0,0 @@
-module(rmdir).
-export([path/1]).
-include_lib("kernel/include/file.hrl").
path(Dir) ->
remove_all_files(".", [Dir]).
remove_all_files(Dir, Files) ->
lists:foreach(fun(File) ->
FilePath = filename:join([Dir, File]),
case file:read_file_info(FilePath) of
{ok, FileInfo} ->
case FileInfo#file_info.type of
directory ->
{ok, DirFiles} = file:list_dir(FilePath),
remove_all_files(FilePath, DirFiles),
file:del_dir(FilePath);
_ ->
file:delete(FilePath)
end;
{error, _Reason} ->
ok
end
end, Files).

View file

@ -2,7 +2,7 @@
%%
%% riak_kv_backend: Riak backend behaviour
%%
%% Copyright (c) 2007-2010 Basho Technologies, Inc. All Rights Reserved.
%% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
@ -23,8 +23,8 @@
%%% NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
%%%
%%% This is a temporary copy of riak_kv_backend, just here to keep
%%% wterl development private for now. When riak_kv_wterl_backend is
%%% moved to riak_kv, delete this file.
%%% WiredTiger development private for now. When riak_kv_wiredtiger_backend
%%% is moved to riak_kv, delete this file.
%%%
%%% NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
@ -272,16 +272,10 @@ empty_check({Backend, State}) ->
}.
setup({BackendMod, Config}) ->
application:start(lager),
application:start(sasl),
application:start(os_mon),
{ok, S} = BackendMod:start(42, Config),
{BackendMod, S}.
cleanup({BackendMod, S}) ->
ok = BackendMod:stop(S),
application:stop(lager),
application:stop(sasl),
application:stop(os_mon).
ok = BackendMod:stop(S).
-endif. % TEST

View file

@ -1,13 +1,12 @@
{application, wterl,
{application, wt,
[
{description, "Erlang NIF Wrapper for WiredTiger"},
{vsn, "0.9.0"},
{vsn, "1.0.0"},
{registered, []},
{applications, [
kernel,
stdlib
]},
{mod, {wterl_app, []}},
{env, [
]}
]}.
{mod, {wt_app, []}},
{env, []}
]}.

664
src/wt.erl Normal file
View file

@ -0,0 +1,664 @@
%% -------------------------------------------------------------------
%%
%% wt: Erlang Wrapper for WiredTiger
%%
%% Copyright (c) 2012-2013 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wt).
-export([conn_open/2,
conn_close/1,
cursor_close/1,
cursor_insert/3,
cursor_next/1,
cursor_next_key/1,
cursor_next_value/1,
cursor_open/2,
cursor_prev/1,
cursor_prev_key/1,
cursor_prev_value/1,
cursor_remove/3,
cursor_reset/1,
cursor_search/2,
cursor_search_near/2,
cursor_update/3,
session_checkpoint/1,
session_checkpoint/2,
session_close/1,
session_create/2,
session_create/3,
session_delete/3,
session_drop/2,
session_drop/3,
session_get/3,
session_open/1,
session_open/2,
session_put/4,
session_rename/3,
session_rename/4,
session_salvage/2,
session_salvage/3,
session_truncate/2,
session_truncate/3,
session_upgrade/2,
session_upgrade/3,
session_verify/2,
session_verify/3,
config_value/3,
config_to_bin/1,
fold_keys/3,
fold/3]).
-ifdef(TEST).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
-define(QC_OUT(P),
eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)).
-endif.
-include_lib("eunit/include/eunit.hrl").
-endif.
-type config() :: binary().
-type config_list() :: [{atom(), any()}].
-opaque connection() :: reference().
-opaque session() :: reference().
-opaque cursor() :: reference().
-type key() :: binary().
-type value() :: binary().
-export_type([connection/0, session/0, cursor/0]).
-on_load(init/0).
-define(nif_stub, nif_stub_error(?LINE)).
nif_stub_error(Line) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,Line}).
-define(EMPTY_CONFIG, <<"\0">>).
-spec init() -> ok | {error, any()}.
init() ->
PrivDir = case code:priv_dir(?MODULE) of
{error, bad_name} ->
EbinDir = filename:dirname(code:which(?MODULE)),
AppPath = filename:dirname(EbinDir),
filename:join(AppPath, "priv");
Path ->
Path
end,
erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
-spec conn_open(string(), config()) -> {ok, connection()} | {error, term()}.
conn_open(_HomeDir, _Config) ->
?nif_stub.
-spec conn_close(connection()) -> ok | {error, term()}.
conn_close(_ConnRef) ->
?nif_stub.
-spec session_open(connection()) -> {ok, session()} | {error, term()}.
session_open(ConnRef) ->
session_open(ConnRef, ?EMPTY_CONFIG).
-spec session_open(connection(), config()) -> {ok, session()} | {error, term()}.
session_open(_ConnRef, _Config) ->
?nif_stub.
-spec session_close(session()) -> ok | {error, term()}.
session_close(_Ref) ->
?nif_stub.
-spec session_create(session(), string()) -> ok | {error, term()}.
-spec session_create(session(), string(), config()) -> ok | {error, term()}.
session_create(Ref, Name) ->
session_create(Ref, Name, ?EMPTY_CONFIG).
session_create(_Ref, _Name, _Config) ->
?nif_stub.
-spec session_drop(session(), string()) -> ok | {error, term()}.
-spec session_drop(session(), string(), config()) -> ok | {error, term()}.
session_drop(Ref, Name) ->
session_drop(Ref, Name, ?EMPTY_CONFIG).
session_drop(_Ref, _Name, _Config) ->
?nif_stub.
-spec session_delete(session(), string(), key()) -> ok | {error, term()}.
session_delete(_Ref, _Table, _Key) ->
?nif_stub.
-spec session_get(session(), string(), key()) -> {ok, value()} | not_found | {error, term()}.
session_get(_Ref, _Table, _Key) ->
?nif_stub.
-spec session_put(session(), string(), key(), value()) -> ok | {error, term()}.
session_put(_Ref, _Table, _Key, _Value) ->
?nif_stub.
-spec session_rename(session(), string(), string()) -> ok | {error, term()}.
-spec session_rename(session(), string(), string(), config()) -> ok | {error, term()}.
session_rename(Ref, OldName, NewName) ->
session_rename(Ref, OldName, NewName, ?EMPTY_CONFIG).
session_rename(_Ref, _OldName, _NewName, _Config) ->
?nif_stub.
-spec session_salvage(session(), string()) -> ok | {error, term()}.
-spec session_salvage(session(), string(), config()) -> ok | {error, term()}.
session_salvage(Ref, Name) ->
session_salvage(Ref, Name, ?EMPTY_CONFIG).
session_salvage(_Ref, _Name, _Config) ->
?nif_stub.
-spec session_checkpoint(session()) -> ok | {error, term()}.
-spec session_checkpoint(session(), config()) -> ok | {error, term()}.
session_checkpoint(_Ref) ->
session_checkpoint(_Ref, ?EMPTY_CONFIG).
session_checkpoint(_Ref, _Config) ->
?nif_stub.
-spec session_truncate(session(), string()) -> ok | {error, term()}.
-spec session_truncate(session(), string(), config()) -> ok | {error, term()}.
session_truncate(Ref, Name) ->
session_truncate(Ref, Name, ?EMPTY_CONFIG).
session_truncate(_Ref, _Name, _Config) ->
?nif_stub.
-spec session_upgrade(session(), string()) -> ok | {error, term()}.
-spec session_upgrade(session(), string(), config()) -> ok | {error, term()}.
session_upgrade(Ref, Name) ->
session_upgrade(Ref, Name, ?EMPTY_CONFIG).
session_upgrade(_Ref, _Name, _Config) ->
?nif_stub.
-spec session_verify(session(), string()) -> ok | {error, term()}.
-spec session_verify(session(), string(), config()) -> ok | {error, term()}.
session_verify(Ref, Name) ->
session_verify(Ref, Name, ?EMPTY_CONFIG).
session_verify(_Ref, _Name, _Config) ->
?nif_stub.
-spec cursor_open(session(), string()) -> {ok, cursor()} | {error, term()}.
cursor_open(_Ref, _Table) ->
?nif_stub.
-spec cursor_close(cursor()) -> ok | {error, term()}.
cursor_close(_Cursor) ->
?nif_stub.
-spec cursor_next(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_next(_Cursor) ->
?nif_stub.
-spec cursor_next_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_next_key(_Cursor) ->
?nif_stub.
-spec cursor_next_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_next_value(_Cursor) ->
?nif_stub.
-spec cursor_prev(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_prev(_Cursor) ->
?nif_stub.
-spec cursor_prev_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_prev_key(_Cursor) ->
?nif_stub.
-spec cursor_prev_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_prev_value(_Cursor) ->
?nif_stub.
-spec cursor_search(cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search(_Cursor, _Key) ->
?nif_stub.
-spec cursor_search_near(cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search_near(_Cursor, _Key) ->
?nif_stub.
-spec cursor_reset(cursor()) -> ok | {error, term()}.
cursor_reset(_Cursor) ->
?nif_stub.
-spec cursor_insert(cursor(), key(), value()) -> ok | {error, term()}.
cursor_insert(_Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_update(cursor(), key(), value()) -> ok | {error, term()}.
cursor_update(_Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_remove(cursor(), key(), value()) -> ok | {error, term()}.
cursor_remove(_Cursor, _Key, _Value) ->
?nif_stub.
-type fold_keys_fun() :: fun((Key::binary(), any()) -> any()).
-spec fold_keys(cursor(), fold_keys_fun(), any()) -> any().
fold_keys(Cursor, Fun, Acc0) ->
fold_keys(Cursor, Fun, Acc0, cursor_next_key(Cursor)).
fold_keys(_Cursor, _Fun, Acc, not_found) ->
Acc;
fold_keys(Cursor, Fun, Acc, {ok, Key}) ->
fold_keys(Cursor, Fun, Fun(Key, Acc), cursor_next_key(Cursor)).
-type fold_fun() :: fun(({Key::binary(), Value::binary()}, any()) -> any()).
-spec fold(cursor(), fold_fun(), any()) -> any().
fold(Cursor, Fun, Acc0) ->
fold(Cursor, Fun, Acc0, cursor_next(Cursor)).
fold(_Cursor, _Fun, Acc, not_found) ->
Acc;
fold(Cursor, Fun, Acc, {ok, Key, Value}) ->
fold(Cursor, Fun, Fun({Key, Value}, Acc), cursor_next(Cursor)).
%%
%% Configuration type information.
%%
config_types() ->
[{block_compressor, string},
{cache_size, string},
{chunk, string},
{create, bool},
{direct_io, map},
{drop, list},
{error_prefix, string},
{eviction_target, integer},
{eviction_trigger, integer},
{extensions, string},
{force, bool},
{hazard_max, integer},
{home_environment, bool},
{home_environment_priv, bool},
{internal_page_max, string},
{isolation, string},
{key_type, string},
{leaf_page_max, string},
{logging, bool},
{lsm_bloom_config, config},
{lsm_chunk_size, string},
{min, string},
{multiprocess, bool},
{name, string},
{session_max, integer},
{shared_cache, config},
{size, string},
{sync, bool},
{target, list},
{transactional, bool},
{verbose, map}].
config_value(Key, Config, Default) ->
{Key, app_helper:get_prop_or_env(Key, Config, wt, Default)}.
config_encode(integer, Value) ->
try
list_to_binary(integer_to_list(Value))
catch
_:_ ->
invalid
end;
config_encode(config, Value) ->
list_to_binary(["(", config_to_bin(Value, []), ")"]);
config_encode(list, Value) ->
list_to_binary(["(", string:join(Value, ","), ")"]);
config_encode(map, Value) ->
list_to_binary(["[", string:join(Value, ","), "]"]);
config_encode(string, Value) ->
list_to_binary(Value);
config_encode(bool, true) ->
<<"true">>;
config_encode(bool, false) ->
<<"false">>;
config_encode(_Type, _Value) ->
invalid.
-spec config_to_bin(config_list()) -> config().
config_to_bin(Opts) ->
iolist_to_binary([config_to_bin(Opts, []), <<"\0">>]).
config_to_bin([], Acc) ->
iolist_to_binary(Acc);
config_to_bin([ [] | Rest], Acc) ->
config_to_bin(Rest, Acc);
config_to_bin([{Key, Value} | Rest], Acc) ->
case lists:keysearch(Key, 1, config_types()) of
{value, {Key, Type}} ->
Acc2 = case config_encode(Type, Value) of
invalid ->
error_logger:error_msg("Skipping invalid option ~p = ~p\n",
[Key, Value]),
Acc;
EncodedValue ->
EncodedKey = atom_to_binary(Key, utf8),
[EncodedKey, <<"=">>, EncodedValue, <<",">> | Acc]
end,
config_to_bin(Rest, Acc2);
false ->
error_logger:error_msg("Skipping unknown option ~p = ~p\n", [Key, Value]),
config_to_bin(Rest, Acc)
end.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
-define(TEST_DATA_DIR, "test/wt.basic").
open_test_conn(DataDir) ->
?assertCmd("rm -rf "++DataDir),
?assertMatch(ok, filelib:ensure_dir(filename:join(DataDir, "x"))),
OpenConfig = config_to_bin([{create,true},{cache_size,"1GB"}]),
{ok, ConnRef} = conn_open(DataDir, OpenConfig),
ConnRef.
open_test_session(ConnRef) ->
{ok, SRef} = session_open(ConnRef),
?assertMatch(ok, session_drop(SRef, "table:test", config_to_bin([{force,true}]))),
?assertMatch(ok, session_create(SRef, "table:test")),
SRef.
conn_test() ->
ConnRef = open_test_conn(?TEST_DATA_DIR),
?assertMatch(ok, conn_close(ConnRef)).
session_test_() ->
{setup,
fun() ->
open_test_conn(?TEST_DATA_DIR)
end,
fun(ConnRef) ->
ok = conn_close(ConnRef)
end,
fun(ConnRef) ->
{inorder,
[{"open/close a session",
fun() ->
{ok, SRef} = session_open(ConnRef),
?assertMatch(ok, session_close(SRef))
end},
{"create and drop a table",
fun() ->
SRef = open_test_session(ConnRef),
?assertMatch(ok, session_drop(SRef, "table:test")),
?assertMatch(ok, session_close(SRef))
end}]}
end}.
insert_delete_test() ->
ConnRef = open_test_conn(?TEST_DATA_DIR),
SRef = open_test_session(ConnRef),
?assertMatch(ok, session_put(SRef, "table:test", <<"a">>, <<"apple">>)),
?assertMatch({ok, <<"apple">>}, session_get(SRef, "table:test", <<"a">>)),
?assertMatch(ok, session_delete(SRef, "table:test", <<"a">>)),
?assertMatch(not_found, session_get(SRef, "table:test", <<"a">>)),
ok = session_close(SRef),
ok = conn_close(ConnRef).
init_test_table() ->
ConnRef = open_test_conn(?TEST_DATA_DIR),
SRef = open_test_session(ConnRef),
?assertMatch(ok, session_put(SRef, "table:test", <<"a">>, <<"apple">>)),
?assertMatch(ok, session_put(SRef, "table:test", <<"b">>, <<"banana">>)),
?assertMatch(ok, session_put(SRef, "table:test", <<"c">>, <<"cherry">>)),
?assertMatch(ok, session_put(SRef, "table:test", <<"d">>, <<"date">>)),
?assertMatch(ok, session_put(SRef, "table:test", <<"g">>, <<"gooseberry">>)),
{ConnRef, SRef}.
stop_test_table({ConnRef, SRef}) ->
?assertMatch(ok, session_close(SRef)),
?assertMatch(ok, conn_close(ConnRef)).
various_session_test_() ->
{setup,
fun init_test_table/0,
fun stop_test_table/1,
fun({_, SRef}) ->
{inorder,
[{"session verify",
fun() ->
?assertMatch(ok, session_verify(SRef, "table:test")),
?assertMatch({ok, <<"apple">>},
session_get(SRef, "table:test", <<"a">>))
end},
{"session checkpoint",
fun() ->
Cfg = wt:config_to_bin([{target, ["\"table:test\""]}]),
?assertMatch(ok, session_checkpoint(SRef, Cfg)),
?assertMatch({ok, <<"apple">>},
session_get(SRef, "table:test", <<"a">>))
end},
{"session salvage",
fun() ->
ok = session_salvage(SRef, "table:test"),
{ok, <<"apple">>} = session_get(SRef, "table:test", <<"a">>)
end},
{"session upgrade",
fun() ->
?assertMatch(ok, session_upgrade(SRef, "table:test")),
?assertMatch({ok, <<"apple">>},
session_get(SRef, "table:test", <<"a">>))
end},
{"session rename",
fun() ->
?assertMatch(ok,
session_rename(SRef, "table:test", "table:new")),
?assertMatch({ok, <<"apple">>},
session_get(SRef, "table:new", <<"a">>)),
?assertMatch(ok,
session_rename(SRef, "table:new", "table:test")),
?assertMatch({ok, <<"apple">>},
session_get(SRef, "table:test", <<"a">>))
end},
{"session truncate",
fun() ->
?assertMatch(ok, session_truncate(SRef, "table:test")),
?assertMatch(not_found, session_get(SRef, "table:test", <<"a">>))
end}]}
end}.
cursor_open_close_test() ->
{ConnRef, SRef} = init_test_table(),
{ok, Cursor1} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"a">>, <<"apple">>}, cursor_next(Cursor1)),
?assertMatch(ok, cursor_close(Cursor1)),
{ok, Cursor2} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"g">>, <<"gooseberry">>}, cursor_prev(Cursor2)),
?assertMatch(ok, cursor_close(Cursor2)),
stop_test_table({ConnRef, SRef}).
various_cursor_test_() ->
{setup,
fun init_test_table/0,
fun stop_test_table/1,
fun({_, SRef}) ->
{inorder,
[{"move a cursor back and forth, getting key",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"a">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"b">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"c">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"d">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"c">>}, cursor_prev_key(Cursor)),
?assertMatch({ok, <<"d">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"g">>}, cursor_next_key(Cursor)),
?assertMatch(not_found, cursor_next_key(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"move a cursor back and forth, getting value",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"banana">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"cherry">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"date">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"cherry">>}, cursor_prev_value(Cursor)),
?assertMatch({ok, <<"date">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"gooseberry">>}, cursor_next_value(Cursor)),
?assertMatch(not_found, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"move a cursor back and forth, getting key and value",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"a">>, <<"apple">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"b">>, <<"banana">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"c">>, <<"cherry">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"d">>, <<"date">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"c">>, <<"cherry">>}, cursor_prev(Cursor)),
?assertMatch({ok, <<"d">>, <<"date">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"g">>, <<"gooseberry">>}, cursor_next(Cursor)),
?assertMatch(not_found, cursor_next(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"fold keys",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch([<<"g">>, <<"d">>, <<"c">>, <<"b">>, <<"a">>],
fold_keys(Cursor, fun(Key, Acc) -> [Key | Acc] end, [])),
?assertMatch(ok, cursor_close(Cursor))
end},
{"search for an item",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"banana">>}, cursor_search(Cursor, <<"b">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"range search for an item",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"gooseberry">>},
cursor_search_near(Cursor, <<"z">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"check cursor reset",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_reset(Cursor)),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"insert/overwrite an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch(ok,
cursor_insert(Cursor, <<"h">>, <<"huckleberry">>)),
?assertMatch({ok, <<"huckleberry">>},
cursor_search(Cursor, <<"h">>)),
?assertMatch(ok,
cursor_insert(Cursor, <<"g">>, <<"grapefruit">>)),
?assertMatch({ok, <<"grapefruit">>},
cursor_search(Cursor, <<"g">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch({ok, <<"grapefruit">>},
session_get(SRef, "table:test", <<"g">>)),
?assertMatch({ok, <<"huckleberry">>},
session_get(SRef, "table:test", <<"h">>))
end},
{"update an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch(ok,
cursor_update(Cursor, <<"g">>, <<"goji berries">>)),
?assertMatch(not_found,
cursor_update(Cursor, <<"k">>, <<"kumquat">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch({ok, <<"goji berries">>},
session_get(SRef, "table:test", <<"g">>))
end},
{"remove an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(SRef, "table:test"),
?assertMatch(ok,
cursor_remove(Cursor, <<"g">>, <<"goji berries">>)),
?assertMatch(not_found,
cursor_remove(Cursor, <<"l">>, <<"lemon">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch(not_found,
session_get(SRef, "table:test", <<"g">>))
end}]}
end}.
-ifdef(EQC).
qc(P) ->
?assert(eqc:quickcheck(?QC_OUT(P))).
keys() ->
eqc_gen:non_empty(list(eqc_gen:non_empty(binary()))).
values() ->
eqc_gen:non_empty(list(binary())).
ops(Keys, Values) ->
{oneof([put, delete]), oneof(Keys), oneof(Values)}.
apply_kv_ops([], _SRef, _Tbl, Acc0) ->
Acc0;
apply_kv_ops([{put, K, V} | Rest], SRef, Tbl, Acc0) ->
ok = wt:session_put(SRef, Tbl, K, V),
apply_kv_ops(Rest, SRef, Tbl, orddict:store(K, V, Acc0));
apply_kv_ops([{delete, K, _} | Rest], SRef, Tbl, Acc0) ->
ok = case wt:session_delete(SRef, Tbl, K) of
ok ->
ok;
not_found ->
ok;
Else ->
Else
end,
apply_kv_ops(Rest, SRef, Tbl, orddict:store(K, deleted, Acc0)).
prop_put_delete() ->
?LET({Keys, Values}, {keys(), values()},
?FORALL(Ops, eqc_gen:non_empty(list(ops(Keys, Values))),
begin
DataDir = "/tmp/wt.putdelete.qc",
Table = "table:eqc",
?cmd("rm -rf "++DataDir),
ok = filelib:ensure_dir(filename:join(DataDir, "x")),
Cfg = wt:config_to_bin([{create,true}]),
{ok, Conn} = wt:conn_open(DataDir, Cfg),
{ok, SRef} = wt:session_open(Conn),
try
wt:session_create(SRef, Table),
Model = apply_kv_ops(Ops, SRef, Table, []),
%% Validate that all deleted values return not_found
F = fun({K, deleted}) ->
?assertEqual(not_found, wt:session_get(SRef, Table, K));
({K, V}) ->
?assertEqual({ok, V}, wt:session_get(SRef, Table, K))
end,
lists:map(F, Model),
true
after
wt:session_close(SRef),
wt:conn_close(Conn)
end
end)).
prop_put_delete_test_() ->
{timeout, 3*60, fun() -> qc(prop_put_delete()) end}.
-endif.
-endif.

View file

@ -1,6 +1,6 @@
%% -------------------------------------------------------------------
%%
%% wterl: access to WiredTiger database
%% wt: access to WiredTiger database
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
@ -19,7 +19,7 @@
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl_app).
-module(wt_app).
-author('Steve Vinoski <steve@basho.com>').
-behaviour(application).
@ -33,7 +33,7 @@
%% ===================================================================
start(_StartType, _StartArgs) ->
wterl_sup:start_link().
wt_sup:start_link().
stop(_State) ->
ok.

View file

@ -1,6 +1,6 @@
%% -------------------------------------------------------------------
%%
%% wterl_conn: manage a connection to WiredTiger
%% wt_conn: manage a connection to WiredTiger
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
@ -19,24 +19,27 @@
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl_conn).
-module(wt_conn).
-author('Steve Vinoski <steve@basho.com>').
-behaviour(gen_server).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
-endif.
%% API
-export([start_link/0, stop/0,
open/1, open/2, open/3, is_open/0, get/0, close/1]).
open/1, open/2, is_open/0, get/0, close/1]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-record(state, { conn :: wterl:connection() }).
-record(state, {
conn :: wt:connection()
}).
-type config_list() :: [{atom(), any()}].
@ -52,15 +55,13 @@ start_link() ->
stop() ->
gen_server:cast(?MODULE, stop).
-spec open(string()) -> {ok, wterl:connection()} | {error, term()}.
-spec open(string(), config_list()) -> {ok, wterl:connection()} | {error, term()}.
-spec open(string(), config_list(), config_list()) -> {ok, wterl:connection()} | {error, term()}.
-spec open(string()) -> {ok, wt:connection()} | {error, term()}.
open(Dir) ->
open(Dir, [], []).
open(Dir, ConnectionConfig) ->
gen_server:call(?MODULE, {open, Dir, ConnectionConfig, [], self()}, infinity).
open(Dir, ConnectionConfig, SessionConfig) ->
gen_server:call(?MODULE, {open, Dir, ConnectionConfig, SessionConfig, self()}, infinity).
open(Dir, []).
-spec open(string(), config_list()) -> {ok, wt:connection()} | {error, term()}.
open(Dir, Config) ->
gen_server:call(?MODULE, {open, Dir, Config, self()}, infinity).
-spec is_open() -> boolean().
is_open() ->
@ -70,7 +71,7 @@ is_open() ->
get() ->
gen_server:call(?MODULE, get, infinity).
-spec close(wterl:connection()) -> ok.
-spec close(wt:connection()) -> ok.
close(_Conn) ->
gen_server:call(?MODULE, {close, self()}, infinity).
@ -79,23 +80,24 @@ close(_Conn) ->
%% ====================================================================
init([]) ->
true = wterl_ets:table_ready(),
true = wt_conn_deputy:table_ready(),
{ok, #state{}}.
handle_call({open, Dir, ConnectionConfig, SessionConfig, Caller}, _From, #state{conn=undefined}=State) ->
handle_call({open, Dir, Config, Caller}, _From, #state{conn=undefined}=State) ->
Opts = tailor_config(Config),
{Reply, NState} =
case wterl:connection_open(Dir, ConnectionConfig, SessionConfig) of
{ok, ConnRef}=OK ->
Monitor = erlang:monitor(process, Caller),
true = ets:insert(wterl_ets, {Monitor, Caller}),
{OK, State#state{conn = ConnRef}};
Error ->
{Error, State}
end,
case wt:conn_open(Dir, wt:config_to_bin(Opts)) of
{ok, ConnRef}=OK ->
Monitor = erlang:monitor(process, Caller),
true = ets:insert(wt_conn_deputy, {Monitor, Caller}),
{OK, State#state{conn = ConnRef}};
Error ->
{Error, State}
end,
{reply, Reply, NState};
handle_call({open, _Dir, _ConnectionConfig, _SessionConfig, Caller}, _From, #state{conn=ConnRef}=State) ->
handle_call({open, _Dir, _Config, Caller}, _From,#state{conn=ConnRef}=State) ->
Monitor = erlang:monitor(process, Caller),
true = ets:insert(wterl_ets, {Monitor, Caller}),
true = ets:insert(wt_conn_deputy, {Monitor, Caller}),
{reply, {ok, ConnRef}, State};
handle_call(is_open, _From, #state{conn=ConnRef}=State) ->
@ -107,10 +109,10 @@ handle_call(get, _From, #state{conn=ConnRef}=State) ->
{reply, {ok, ConnRef}, State};
handle_call({close, Caller}, _From, #state{conn=ConnRef}=State) ->
{[{Monitor, Caller}], _} = ets:match_object(wterl_ets, {'_', Caller}, 1),
{[{Monitor, Caller}], _} = ets:match_object(wt_conn_deputy, {'_', Caller}, 1),
true = erlang:demonitor(Monitor, [flush]),
true = ets:delete(wterl_ets, Monitor),
NState = case ets:info(wterl_ets, size) of
true = ets:delete(wt_conn_deputy, Monitor),
NState = case ets:info(wt_conn_deputy, size) of
0 ->
do_close(ConnRef),
State#state{conn=undefined};
@ -127,17 +129,17 @@ handle_cast(stop, #state{conn=ConnRef}=State) ->
do_close(ConnRef),
ets:foldl(fun({Monitor, _}, _) ->
true = erl:demonitor(Monitor, [flush]),
ets:delete(wterl_ets, Monitor)
end, true, wterl_ets),
ets:delete(wt_conn_deputy, Monitor)
end, true, wt_conn_deputy),
{stop, normal, State#state{conn=undefined}};
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info({'DOWN', Monitor, _, _, _}, #state{conn=ConnRef}=State) ->
NState = case ets:lookup(wterl_ets, Monitor) of
NState = case ets:lookup(wt_conn_deputy, Monitor) of
[{Monitor, _}] ->
true = ets:delete(wterl_ets, Monitor),
case ets:info(wterl_ets, size) of
true = ets:delete(wt_conn_deputy, Monitor),
case ets:info(wt_conn_deputy, size) of
0 ->
do_close(ConnRef),
State#state{conn=undefined};
@ -166,12 +168,45 @@ code_change(_OldVsn, State, _Extra) ->
do_close(undefined) ->
ok;
do_close(ConnRef) ->
wterl:connection_close(ConnRef).
wt:conn_close(ConnRef).
%% @private
config_value(Key, Config, Default) ->
{Key, app_helper:get_prop_or_env(Key, Config, wt, Default)}.
%% @private
map_cfg([], Acc) ->
Acc;
map_cfg([Fun|T], Acc) ->
map_cfg(T, Fun(Acc)).
tailor_config(Config) ->
map_cfg([fun (Acc) ->
case proplists:is_defined(create, Acc) of
false -> [{create, true} | Acc];
true -> Acc
end
end,
fun (Acc) ->
case proplists:is_defined(shared_cache, Acc) of
false ->
[config_value(cache_size, Acc, "512MB") | Acc];
true ->
Acc
end
end,
fun (Acc) ->
case proplists:is_defined(session_max, Acc) of
false ->
[config_value(session_max, Acc, 100) | Acc];
true ->
Acc
end
end], Config).
-ifdef(TEST).
-define(DATADIR, "test/wterl-backend").
-define(DATADIR, "test/wt-backend").
simple_test_() ->
{spawn,
@ -179,7 +214,7 @@ simple_test_() ->
fun() ->
?assertCmd("rm -rf " ++ ?DATADIR),
?assertMatch(ok, filelib:ensure_dir(filename:join(?DATADIR, "x"))),
EtsPid = case wterl_ets:start_link() of
EtsPid = case wt_conn_deputy:start_link() of
{ok, Pid1} ->
Pid1;
{error, {already_started, Pid1}} ->
@ -195,7 +230,7 @@ simple_test_() ->
end,
fun(_) ->
stop(),
wterl_ets:stop()
wt_conn_deputy:stop()
end,
fun(_) ->
{inorder,
@ -209,14 +244,14 @@ simple_test_() ->
end}]}.
open_one() ->
{ok, Ref} = open("test/wterl-backend", [{create, true}, {session_max, 20},{cache_size, "1MB"}]),
{ok, Ref} = open("test/wt-backend", [{create, true},{session_max, 20}]),
true = is_open(),
close(Ref),
false = is_open(),
ok.
open_and_wait(Pid) ->
{ok, Ref} = open("test/wterl-backend", [{create, true}]),
{ok, Ref} = open("test/wt-backend", [{create, true}]),
Pid ! open,
receive
close ->

View file

@ -1,6 +1,6 @@
%% -------------------------------------------------------------------
%%
%% wterl_ets: ets table owner for wterl_conn
%% wt_conn_deputy: ets table owner for wt_conn
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
@ -19,17 +19,17 @@
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl_ets).
-module(wt_conn_deputy).
-author('Steve Vinoski <steve@basho.com>').
-behaviour(gen_server).
%% ====================================================================
%% The sole purpose of this module is to own the ets table used by the
%% wterl_conn module. Holding the ets table in an otherwise do-nothing
%% wt_conn module. Holding the ets table in an otherwise do-nothing
%% server avoids losing the table and its contents should an unexpected
%% error occur in wterl_conn if it were the owner instead. This module
%% is unit-tested as part of the wterl_conn module.
%% error occur in wt_conn if it were the owner instead. This module
%% is unit-tested as part of the wt_conn module.
%% ====================================================================
%% API

View file

@ -1,6 +1,6 @@
%% -------------------------------------------------------------------
%%
%% wterl_sup: supervisor for WiredTiger database app
%% wt_sup: supervisor for WiredTiger database app
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
@ -19,7 +19,7 @@
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl_sup).
-module(wt_sup).
-author('Steve Vinoski <steve@basho.com>').
-behaviour(supervisor).
@ -45,6 +45,5 @@ start_link() ->
%% ===================================================================
init([]) ->
{ok, {{one_for_one, 5, 10}, [?CHILD(wterl_ets, worker),
?CHILD(wterl_conn, worker),
?CHILD(wterl_event_handler, worker)]}}.
{ok, {{one_for_one, 5, 10}, [?CHILD(wt_conn_deputy, worker),
?CHILD(wt_conn, worker)]}}.

View file

@ -1,962 +0,0 @@
%% -------------------------------------------------------------------
%%
%% wterl: Erlang Wrapper for WiredTiger
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl).
-export([connection_open/2,
connection_open/3,
connection_close/1,
cursor_close/1,
cursor_insert/3,
cursor_next/1,
cursor_next_key/1,
cursor_next_value/1,
cursor_open/2,
cursor_open/3,
cursor_prev/1,
cursor_prev_key/1,
cursor_prev_value/1,
cursor_remove/2,
cursor_reset/1,
cursor_search/2,
cursor_search/3,
cursor_search_near/2,
cursor_search_near/3,
cursor_update/3,
checkpoint/1,
checkpoint/2,
create/2,
create/3,
delete/3,
drop/2,
drop/3,
get/3,
put/4,
rename/3,
rename/4,
salvage/2,
salvage/3,
truncate/2,
truncate/3,
truncate/4,
truncate/5,
upgrade/2,
upgrade/3,
verify/2,
verify/3,
config_value/3,
priv_dir/0,
fold_keys/3,
fold/3]).
-export([set_event_handler_pid/1]).
-ifdef(TEST).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
-define(QC_OUT(P), eqc:on_output(fun(Str, Args) -> io:format(user, Str, Args) end, P)).
-endif.
-include_lib("eunit/include/eunit.hrl").
-endif.
-type config() :: binary().
-type config_list() :: [{atom(), any()}].
-opaque connection() :: reference().
-opaque cursor() :: reference().
-type key() :: binary().
-type value() :: binary().
-export_type([connection/0, cursor/0]).
-on_load(init/0).
-include("async_nif.hrl").
-define(nif_stub, nif_stub_error(?LINE)).
nif_stub_error(Line) ->
erlang:nif_error({nif_not_loaded,module,?MODULE,line,Line}).
-spec init() -> ok | {error, any()}.
init() ->
erlang:load_nif(filename:join([priv_dir(), atom_to_list(?MODULE)]),
[{wterl_vsn, "942e51b"},
{wiredtiger_vsn, "1.6.4-275-g9c44420"}]). %% TODO automate these
-spec connection_open(string(), config_list()) -> {ok, connection()} | {error, term()}.
-spec connection_open(string(), config_list(), config_list()) -> {ok, connection()} | {error, term()}.
connection_open(HomeDir, ConnectionConfig) ->
connection_open(HomeDir, ConnectionConfig, []).
connection_open(HomeDir, ConnectionConfig, SessionConfig) ->
PrivDir = wterl:priv_dir(),
{ok, PrivFiles} = file:list_dir(PrivDir),
SoFiles =
lists:filter(fun(Elem) ->
case re:run(Elem, "^libwiredtiger_.*\.so$") of
{match, _} -> true;
nomatch -> false
end
end, PrivFiles),
SoPaths = lists:map(fun(Elem) -> filename:join([PrivDir, Elem]) end, SoFiles),
conn_open(HomeDir, [{extensions, SoPaths}] ++ ConnectionConfig, SessionConfig).
-spec conn_open(string(), config_list(), config_list()) -> {ok, connection()} | {error, term()}.
conn_open(HomeDir, ConnectionConfig, SessionConfig) ->
?ASYNC_NIF_CALL(fun conn_open_nif/4, [HomeDir,
config_to_bin(ConnectionConfig),
config_to_bin(SessionConfig)]).
-spec conn_open_nif(reference(), string(), config(), config()) -> {ok, connection()} | {error, term()}.
conn_open_nif(_AsyncRef, _HomeDir, _ConnectionConfig, _SessionConfig) ->
?nif_stub.
-spec connection_close(connection()) -> ok | {error, term()}.
connection_close(ConnRef) ->
?ASYNC_NIF_CALL(fun conn_close_nif/2, [ConnRef]).
-spec conn_close_nif(reference(), connection()) -> ok | {error, term()}.
conn_close_nif(_AsyncRef, _ConnRef) ->
?nif_stub.
-spec create(connection(), string()) -> ok | {error, term()}.
-spec create(connection(), string(), config_list()) -> ok | {error, term()}.
create(Ref, Name) ->
create(Ref, Name, []).
create(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun create_nif/4, [Ref, Name, config_to_bin(Config)]).
-spec create_nif(reference(), connection(), string(), config()) -> ok | {error, term()}.
create_nif(_AsyncNif, _Ref, _Name, _Config) ->
?nif_stub.
-spec drop(connection(), string()) -> ok | {error, term()}.
-spec drop(connection(), string(), config_list()) -> ok | {error, term()}.
drop(Ref, Name) ->
drop(Ref, Name, [{force, true}]).
drop(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun drop_nif/4, [Ref, Name, config_to_bin(Config)]).
-spec drop_nif(reference(), connection(), string(), config()) -> ok | {error, term()}.
drop_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec delete(connection(), string(), key()) -> ok | {error, term()}.
delete(Ref, Table, Key) ->
?ASYNC_NIF_CALL(fun delete_nif/4, [Ref, Table, Key]).
-spec delete_nif(reference(), connection(), string(), key()) -> ok | {error, term()}.
delete_nif(_AsyncRef, _Ref, _Table, _Key) ->
?nif_stub.
-spec get(connection(), string(), key()) -> {ok, value()} | not_found | {error, term()}.
get(Ref, Table, Key) ->
?ASYNC_NIF_CALL(fun get_nif/4, [Ref, Table, Key]).
-spec get_nif(reference(), connection(), string(), key()) -> {ok, value()} | not_found | {error, term()}.
get_nif(_AsyncRef, _Ref, _Table, _Key) ->
?nif_stub.
-spec put(connection(), string(), key(), value()) -> ok | {error, term()}.
put(Ref, Table, Key, Value) ->
?ASYNC_NIF_CALL(fun put_nif/5, [Ref, Table, Key, Value]).
-spec put_nif(reference(), connection(), string(), key(), value()) -> ok | {error, term()}.
put_nif(_AsyncRef, _Ref, _Table, _Key, _Value) ->
?nif_stub.
-spec rename(connection(), string(), string()) -> ok | {error, term()}.
-spec rename(connection(), string(), string(), config_list()) -> ok | {error, term()}.
rename(Ref, OldName, NewName) ->
rename(Ref, OldName, NewName, []).
rename(Ref, OldName, NewName, Config) ->
?ASYNC_NIF_CALL(fun rename_nif/5, [Ref, OldName, NewName, config_to_bin(Config)]).
-spec rename_nif(reference(), connection(), string(), string(), config()) -> ok | {error, term()}.
rename_nif(_AsyncRef, _Ref, _OldName, _NewName, _Config) ->
?nif_stub.
-spec salvage(connection(), string()) -> ok | {error, term()}.
-spec salvage(connection(), string(), config_list()) -> ok | {error, term()}.
salvage(Ref, Name) ->
salvage(Ref, Name, []).
salvage(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun salvage_nif/4, [Ref, Name, config_to_bin(Config)]).
-spec salvage_nif(reference(), connection(), string(), config()) -> ok | {error, term()}.
salvage_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec checkpoint(connection()) -> ok | {error, term()}.
-spec checkpoint(connection(), config_list()) -> ok | {error, term()}.
checkpoint(_Ref) ->
checkpoint(_Ref, []).
checkpoint(Ref, Config) ->
?ASYNC_NIF_CALL(fun checkpoint_nif/3, [Ref, config_to_bin(Config)]).
-spec checkpoint_nif(reference(), connection(), config()) -> ok | {error, term()}.
checkpoint_nif(_AsyncRef, _Ref, _Config) ->
?nif_stub.
-spec truncate(connection(), string()) -> ok | {error, term()}.
-spec truncate(connection(), string(), config_list()) -> ok | {error, term()}.
-spec truncate(connection(), string(), binary() | first, binary() | last) -> ok | {error, term()}.
-spec truncate(connection(), string(), binary() | first, binary() | last, config()) -> ok | {error, term()}.
truncate(Ref, Name) ->
truncate(Ref, Name, first, last, []).
truncate(Ref, Name, Config) ->
truncate(Ref, Name, first, last, Config).
truncate(Ref, Name, Start, Stop) ->
truncate(Ref, Name, Start, Stop, []).
truncate(Ref, Name, Start, Stop, Config) ->
?ASYNC_NIF_CALL(fun truncate_nif/6, [Ref, Name, Start, Stop, config_to_bin(Config)]).
-spec truncate_nif(reference(), connection(), string(), cursor() | first, cursor() | last, config()) -> ok | {error, term()}.
truncate_nif(_AsyncRef, _Ref, _Name, _Start, _Stop, _Config) ->
?nif_stub.
-spec upgrade(connection(), string()) -> ok | {error, term()}.
-spec upgrade(connection(), string(), config_list()) -> ok | {error, term()}.
upgrade(Ref, Name) ->
upgrade(Ref, Name, []).
upgrade(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun upgrade_nif/4, [Ref, Name, config_to_bin(Config)]).
-spec upgrade_nif(reference(), connection(), string(), config()) -> ok | {error, term()}.
upgrade_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec verify(connection(), string()) -> ok | {error, term()}.
-spec verify(connection(), string(), config_list()) -> ok | {error, term()}.
verify(Ref, Name) ->
verify(Ref, Name, []).
verify(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun verify_nif/4, [Ref, Name, config_to_bin(Config)]).
-spec verify_nif(reference(), connection(), string(), config()) -> ok | {error, term()}.
verify_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec cursor_open(connection(), string()) -> {ok, cursor()} | {error, term()}.
-spec cursor_open(connection(), string(), config_list()) -> {ok, cursor()} | {error, term()}.
cursor_open(Ref, Table) ->
cursor_open(Ref, Table, []).
cursor_open(Ref, Table, Config) ->
?ASYNC_NIF_CALL(fun cursor_open_nif/4, [Ref, Table, config_to_bin(Config)]).
-spec cursor_open_nif(reference(), connection(), string(), config()) -> {ok, cursor()} | {error, term()}.
cursor_open_nif(_AsyncRef, _Ref, _Table, _Config) ->
?nif_stub.
-spec cursor_close(cursor()) -> ok | {error, term()}.
cursor_close(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_close_nif/2, [Cursor]).
-spec cursor_close_nif(reference(), cursor()) -> ok | {error, term()}.
cursor_close_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_next(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_nif/2, [Cursor]).
-spec cursor_next_nif(reference(), cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_next_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_next_key(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_key_nif/2, [Cursor]).
-spec cursor_next_key_nif(reference(), cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_next_key_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_next_value(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_value_nif/2, [Cursor]).
-spec cursor_next_value_nif(reference(), cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_next_value_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_prev(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_nif/2, [Cursor]).
-spec cursor_prev_nif(reference(), cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_prev_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_prev_key(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_key_nif/2, [Cursor]).
-spec cursor_prev_key_nif(reference(), cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_prev_key_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_prev_value(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_value_nif/2, [Cursor]).
-spec cursor_prev_value_nif(reference(), cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_prev_value_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_search(cursor(), key()) -> {ok, value()} | {error, term()}.
-spec cursor_search(cursor(), key(), boolean()) -> {ok, value()} | {error, term()}.
cursor_search(Cursor, Key) ->
?ASYNC_NIF_CALL(fun cursor_search_nif/4, [Cursor, Key, false]).
cursor_search(Cursor, Key, Scanning) when is_boolean(Scanning) ->
?ASYNC_NIF_CALL(fun cursor_search_nif/4, [Cursor, Key, Scanning]).
-spec cursor_search_nif(reference(), cursor(), key(), boolean()) -> {ok, value()} | {error, term()}.
cursor_search_nif(_AsyncRef, _Cursor, _Key, _Scanning) ->
?nif_stub.
-spec cursor_search_near(cursor(), key()) -> {ok, value()} | {error, term()}.
-spec cursor_search_near(cursor(), key(), boolean()) -> {ok, value()} | {error, term()}.
cursor_search_near(Cursor, Key) ->
?ASYNC_NIF_CALL(fun cursor_search_near_nif/4, [Cursor, Key, false]).
cursor_search_near(Cursor, Key, Scanning) when is_boolean(Scanning) ->
?ASYNC_NIF_CALL(fun cursor_search_near_nif/4, [Cursor, Key, Scanning]).
-spec cursor_search_near_nif(reference(), cursor(), key(), boolean()) -> {ok, value()} | {error, term()}.
cursor_search_near_nif(_AsyncRef, _Cursor, _Key, _Scanning) ->
?nif_stub.
-spec cursor_reset(cursor()) -> ok | {error, term()}.
cursor_reset(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_reset_nif/2, [Cursor]).
-spec cursor_reset_nif(reference(), cursor()) -> ok | {error, term()}.
cursor_reset_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_insert(cursor(), key(), value()) -> ok | {error, term()}.
cursor_insert(Cursor, Key, Value) ->
?ASYNC_NIF_CALL(fun cursor_insert_nif/4, [Cursor, Key, Value]).
-spec cursor_insert_nif(reference(), cursor(), key(), value()) -> ok | {error, term()}.
cursor_insert_nif(_AsyncRef, _Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_update(cursor(), key(), value()) -> ok | {error, term()}.
cursor_update(Cursor, Key, Value) ->
?ASYNC_NIF_CALL(fun cursor_update_nif/4, [Cursor, Key, Value]).
-spec cursor_update_nif(reference(), cursor(), key(), value()) -> ok | {error, term()}.
cursor_update_nif(_AsyncRef, _Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_remove(cursor(), key()) -> ok | {error, term()}.
cursor_remove(Cursor, Key) ->
?ASYNC_NIF_CALL(fun cursor_remove_nif/3, [Cursor, Key]).
-spec cursor_remove_nif(reference(), cursor(), key()) -> ok | {error, term()}.
cursor_remove_nif(_AsyncRef, _Cursor, _Key) ->
?nif_stub.
-type fold_keys_fun() :: fun((Key::binary(), any()) -> any()).
-spec fold_keys(cursor(), fold_keys_fun(), any()) -> any().
fold_keys(Cursor, Fun, Acc0) ->
fold_keys(Cursor, Fun, Acc0, cursor_next_key(Cursor)).
fold_keys(_Cursor, _Fun, Acc, not_found) ->
Acc;
fold_keys(Cursor, Fun, Acc, {ok, Key}) ->
fold_keys(Cursor, Fun, Fun(Key, Acc), cursor_next_key(Cursor)).
-type fold_fun() :: fun(({Key::binary(), Value::binary()}, any()) -> any()).
-spec fold(cursor(), fold_fun(), any()) -> any().
fold(Cursor, Fun, Acc0) ->
fold(Cursor, Fun, Acc0, cursor_next(Cursor)).
fold(_Cursor, _Fun, Acc, not_found) ->
Acc;
fold(Cursor, Fun, Acc, {ok, Key, Value}) ->
fold(Cursor, Fun, Fun({Key, Value}, Acc), cursor_next(Cursor)).
priv_dir() ->
case code:priv_dir(?MODULE) of
{error, bad_name} ->
EbinDir = filename:dirname(code:which(?MODULE)),
AppPath = filename:dirname(EbinDir),
filename:join([AppPath, "priv"]);
Path ->
Path
end.
%%
%% Configuration information.
%%
config_value(Key, Config, Default) ->
{Key, app_helper:get_prop_or_env(Key, Config, wterl, Default)}.
config_encode(integer, Value) ->
try
list_to_binary(integer_to_list(Value))
catch
_:_ ->
invalid
end;
config_encode(config, Value) ->
list_to_binary(["(", config_to_bin(Value, []), ")"]);
config_encode(list, Value) ->
list_to_binary(["(", string:join(Value, ","), ")"]);
config_encode({list, quoted}, Value) ->
Values = lists:map(fun(S) -> "\"" ++ S ++ "\"" end, Value),
list_to_binary(["(", string:join(Values, ","), ")"]);
config_encode(string, Value) when is_list(Value) ->
list_to_binary(Value);
config_encode({string, quoted}, Value) when is_list(Value) ->
list_to_binary("\"" ++ Value ++ "\"");
config_encode(string, Value) when is_number(Value) ->
list_to_binary(integer_to_list(Value));
config_encode(bool, true) ->
<<"true">>;
config_encode(bool, Value) when is_number(Value) andalso Value =/= 0 ->
<<"true">>;
config_encode(bool, "true") ->
<<"true">>;
config_encode(bool, false) ->
<<"false">>;
config_encode(bool, 0) ->
<<"false">>;
config_encode(bool, "false") ->
<<"false">>;
config_encode(_Type, _Value) ->
invalid.
-spec config_to_bin(config_list()) -> config().
config_to_bin(Opts) ->
iolist_to_binary([config_to_bin(Opts, []), <<"\0">>]).
config_to_bin([], Acc) ->
iolist_to_binary(Acc);
config_to_bin([{Key, Value} | Rest], Acc) ->
ConfigTypes =
[{block_compressor, {string, quoted}},
{bloom_bit_count, integer},
{bloom_config, config},
{bloom_hash_count, integer},
{bloom_newest, bool},
{bloom_oldest, bool},
{cache_size, string},
{checkpoint, config},
{checkpoint_sync, bool},
{checksum, string},
{chunk_size, string},
{create, bool},
{direct_io, list},
{drop, list},
{enabled, bool},
{error_prefix, string},
{eviction_target, integer},
{eviction_trigger, integer},
{extensions, {list, quoted}},
{statistics_fast, bool},
{file_max, string},
{force, bool},
{from, string},
{hazard_max, integer},
{home_environment, bool},
{home_environment_priv, bool},
{internal_page_max, string},
{isolation, string},
{key_type, string},
{leaf_page_max, string},
{log, config},
{lsm, config},
{mmap, bool},
{merge_threads, integer},
{multiprocess, bool},
{name, string},
{overwrite, bool},
{prefix_compression, bool},
{raw, bool},
{session_max, integer},
{statistics, list},
{statistics_log, config},
{target, {list, quoted}},
{to, string},
{transaction_sync, string},
{transactional, bool},
{verbose, list},
{wait, integer}],
case lists:keysearch(Key, 1, ConfigTypes) of
{value, {Key, Type}} ->
Acc2 = case config_encode(Type, Value) of
invalid ->
error_logger:error_msg("Skipping invalid option ~p = ~p\n", [Key, Value]),
Acc;
EncodedValue ->
EncodedKey = atom_to_binary(Key, utf8),
[EncodedKey, <<"=">>, EncodedValue, <<",">> | Acc]
end,
config_to_bin(Rest, Acc2);
false ->
error_logger:error_msg("Skipping unknown option ~p = ~p\n", [Key, Value]),
config_to_bin(Rest, Acc)
end.
-spec set_event_handler_pid(pid()) -> ok.
set_event_handler_pid(Pid)
when is_pid(Pid) ->
?nif_stub.
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
-define(TEST_DATA_DIR, "test/wterl.basic").
open_test_conn(DataDir) ->
open_test_conn(DataDir, [{create,true},{cache_size,"1GB"},{session_max, 8192}]).
open_test_conn(DataDir, OpenConfig) ->
{ok, CWD} = file:get_cwd(),
rmdir:path(filename:join([CWD, DataDir])), %?cmd("rm -rf " ++ filename:join([CWD, DataDir])),
?assertMatch(ok, filelib:ensure_dir(filename:join([DataDir, "x"]))),
{ok, ConnRef} = connection_open(filename:join([CWD, DataDir]), OpenConfig),
ConnRef.
open_test_table(ConnRef) ->
open_test_table(ConnRef, "table", []).
open_test_table(ConnRef, Type) ->
open_test_table(ConnRef, Type, []).
open_test_table(ConnRef, Type, Opts) ->
?assertMatch(ok, create(ConnRef, Type ++ ":test", Opts)),
ConnRef.
conn_test_() ->
{setup,
fun() ->
open_test_conn(?TEST_DATA_DIR)
end,
fun(ConnRef) ->
ok = connection_close(ConnRef)
end,
fun(ConnRef) ->
{inorder,
[{"open and close a connection",
fun() ->
ConnRef = open_test_table(ConnRef)
end},
{"create, verify, drop a table(btree)",
fun() ->
wterl:create(ConnRef, "table:test", []),
?assertMatch(ok, verify(ConnRef, "table:test")),
?assertMatch(ok, drop(ConnRef, "table:test"))
end},
{"create, test verify, drop a table(lsm)",
fun() ->
ConnRef = open_test_table(ConnRef, "lsm"),
?assertMatch(ok, verify(ConnRef, "lsm:test")),
?assertMatch(ok, drop(ConnRef, "lsm:test"))
end},
{"create, verify, drop a table(btree, snappy)",
fun() ->
ConnRef = open_test_table(ConnRef, "table", [{block_compressor, "snappy"}]),
?assertMatch(ok, verify(ConnRef, "table:test")),
?assertMatch(ok, drop(ConnRef, "table:test"))
end}
]}
end}.
insert_delete_test() ->
ConnRef = open_test_conn(?TEST_DATA_DIR),
ConnRef = open_test_table(ConnRef),
?assertMatch(ok, put(ConnRef, "table:test", <<"a">>, <<"apple">>)),
?assertMatch({ok, <<"apple">>}, get(ConnRef, "table:test", <<"a">>)),
?assertMatch(ok, delete(ConnRef, "table:test", <<"a">>)),
?assertMatch(not_found, get(ConnRef, "table:test", <<"a">>)),
ok = connection_close(ConnRef).
%% cursor_fold_keys_test() ->
%% ConnRef = open_test_conn(?TEST_DATA_DIR),
%% ConnRef = open_test_table(ConnRef),
%% [wterl:put(ConnRef, "table:test-fold", crypto:sha(<<X>>),
%% crypto:rand_bytes(crypto:rand_uniform(128, 4096)))
%% || X <- lists:seq(1, 2000)],
%% Cursor = wterl:cursor_open(ConnRef, "table:test-fold"),
%% try
%% {Result, _} = wterl:fold_keys(Cursor, fun(Key, Acc) -> [Key | Acc] end, [])
%% catch
%% _:_ -> wterl:cursor_close(Cursor)
%% after
%% ok = connection_close(ConnRef)
%% end.
%% ?assertMatch(lists:sort(Result),
%% lists:sort([crypto:sha(<<X>>) || X <- lists:seq(1, 2000)])).
many_open_tables_test_() ->
{timeout, 120,
fun() ->
ConnOpts = [{create,true},{cache_size,"1GB"},{session_max, 8192}],
DataDir = ?TEST_DATA_DIR,
KeyGen =
fun(X) ->
crypto:hash(sha, <<X>>)
end,
ValGen =
fun() ->
crypto:rand_bytes(crypto:rand_uniform(128, 4096))
end,
TableNameGen =
fun(X) ->
"lsm:" ++ integer_to_list(X)
end,
NumTables = 16, N = 100,
ConnRef = open_test_conn(DataDir, ConnOpts),
Parent = self(),
[ok = wterl:create(ConnRef, TableNameGen(X), [{checksum, "uncompressed"}]) || X <- lists:seq(0, NumTables)],
[spawn(fun() ->
TableName = TableNameGen(X),
[case wterl:put(ConnRef, TableName, KeyGen(P), ValGen()) of
ok -> ok;
{error, {enoent, _}} -> io:format("put failed, table missing ~p~n", [TableName])
end || P <- lists:seq(1, N)],
[case wterl:get(ConnRef, TableName, KeyGen(P)) of
{ok, _} -> ok;
{error, {enoent, _}} -> io:format("get failed, table missing ~p~n", [TableName])
end || P <- lists:seq(1, N)],
[case wterl:delete(ConnRef, TableName, KeyGen(P)) of
ok -> ok;
{error, {enoent, _}} -> io:format("delete failed, table missing ~p~n", [TableName])
end || P <- lists:seq(1, N)],
Parent ! done
end) || X <- lists:seq(0, NumTables)],
[receive done -> ok end || _ <- lists:seq(0, NumTables)],
[case wterl:drop(ConnRef, TableNameGen(X)) of
ok -> ok;
{error, {enoent, _}} -> io:format("drop failed, table missing ~p~n", [TableNameGen(X)])
end || X <- lists:seq(0, NumTables)],
ok = wterl:connection_close(ConnRef)
end}.
init_test_table() ->
ConnRef = open_test_conn(?TEST_DATA_DIR),
ConnRef = open_test_table(ConnRef),
populate_test_table(ConnRef).
populate_test_table(ConnRef) ->
?assertMatch(ok, put(ConnRef, "table:test", <<"a">>, <<"apple">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"b">>, <<"banana">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"c">>, <<"cherry">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"d">>, <<"date">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"e">>, <<"elephant">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"f">>, <<"forest">>)),
?assertMatch(ok, put(ConnRef, "table:test", <<"g">>, <<"gooseberry">>)),
ConnRef.
stop_test_table(ConnRef) ->
?assertMatch(ok, connection_close(ConnRef)).
various_online_test_() ->
{setup,
fun init_test_table/0,
fun stop_test_table/1,
fun(ConnRef) ->
{inorder,
[
{"checkpoint",
fun() ->
?assertMatch(ok, checkpoint(ConnRef, [{target, ["table:test"]}])),
?assertMatch({ok, <<"apple">>}, get(ConnRef, "table:test", <<"a">>))
end},
{"truncate entire table",
fun() ->
?assertMatch(ok, truncate(ConnRef, "table:test")),
?assertMatch(not_found, get(ConnRef, "table:test", <<"a">>))
end},
%% {"truncate range [<<b>>..last], ensure value outside range is found after",
%% fun() ->
%% ?assertMatch(ok, truncate(ConnRef, "table:test", <<"b">>, last)),
%% ?assertMatch({ok, <<"apple">>}, get(ConnRef, "table:test", <<"a">>))
%% end},
%% {"truncate range [first..<<b>>], ensure value inside range is not_found after",
%% fun() ->
%% ?assertMatch(ok, truncate(ConnRef, "table:test", first, <<"b">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"a">>))
%% end},
%% {"truncate range [first..not_found] with a key that doesn't exist",
%% fun() ->
%% ?assertMatch(not_found, truncate(ConnRef, "table:test", first, <<"z">>))
%% end},
%% {"truncate range [not_found..last] with a key that doesn't exist",
%% fun() ->
%% ?assertMatch(not_found, truncate(ConnRef, "table:test", <<"0">>, last))
%% end},
%% {"truncate range [not_found..not_found] with keys that don't exist",
%% fun() ->
%% ?assertMatch(not_found, truncate(ConnRef, "table:test", <<"0">>, <<"0">>))
%% end},
%% {"truncate range [<<b>...<<f>>], ensure value before & after range still exist",
%% fun() ->
%% ?assertMatch(ok, truncate(ConnRef, "table:test", <<"b">>, <<"f">>)),
%% ?assertMatch({ok, <<"apple">>}, get(ConnRef, "table:test", <<"a">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"b">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"c">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"d">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"e">>)),
%% ?assertMatch(not_found, get(ConnRef, "table:test", <<"f">>)),
%% ?assertMatch({ok, <<"gooseberry">>}, get(ConnRef, "table:test", <<"g">>))
%% end},
{"drop table",
fun() ->
?assertMatch(ok, drop(ConnRef, "table:test"))
end}
]}
end}.
various_maintenance_test_() ->
{setup,
fun () ->
{ok, CWD} = file:get_cwd(),
?assertMatch(ok, filelib:ensure_dir(filename:join([?TEST_DATA_DIR, "x"]))),
{ok, ConnRef} = connection_open(filename:join([CWD, ?TEST_DATA_DIR]), [{create,true}]),
ConnRef
end,
fun (ConnRef) ->
?assertMatch(ok, connection_close(ConnRef))
end,
fun(ConnRef) ->
{inorder,
[
{"drop table",
fun() ->
?assertMatch(ok, create(ConnRef, "table:test")),
?assertMatch(ok, drop(ConnRef, "table:test")),
?assertMatch(ok, create(ConnRef, "table:test"))
end},
{"salvage",
fun() ->
?assertMatch(ok, salvage(ConnRef, "table:test"))
end},
{"upgrade",
fun() ->
?assertMatch(ok, upgrade(ConnRef, "table:test"))
end},
{"rename",
fun() ->
?assertMatch(ok, rename(ConnRef, "table:test", "table:new")),
?assertMatch(ok, rename(ConnRef, "table:new", "table:test"))
end}
]}
end}.
cursor_open_close_test() ->
ConnRef = init_test_table(),
{ok, Cursor1} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"a">>, <<"apple">>}, cursor_next(Cursor1)),
?assertMatch(ok, cursor_close(Cursor1)),
{ok, Cursor2} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"g">>, <<"gooseberry">>}, cursor_prev(Cursor2)),
?assertMatch(ok, cursor_close(Cursor2)),
stop_test_table(ConnRef).
various_cursor_test_() ->
{setup,
fun init_test_table/0,
fun stop_test_table/1,
fun(ConnRef) ->
{inorder,
[{"move a cursor back and forth, getting key",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"a">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"b">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"c">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"d">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"c">>}, cursor_prev_key(Cursor)),
?assertMatch({ok, <<"d">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"e">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"f">>}, cursor_next_key(Cursor)),
?assertMatch({ok, <<"g">>}, cursor_next_key(Cursor)),
?assertMatch(not_found, cursor_next_key(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"move a cursor back and forth, getting value",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"banana">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"cherry">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"date">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"cherry">>}, cursor_prev_value(Cursor)),
?assertMatch({ok, <<"date">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"elephant">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"forest">>}, cursor_next_value(Cursor)),
?assertMatch({ok, <<"gooseberry">>}, cursor_next_value(Cursor)),
?assertMatch(not_found, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"move a cursor back and forth, getting key and value",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"a">>, <<"apple">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"b">>, <<"banana">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"c">>, <<"cherry">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"d">>, <<"date">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"c">>, <<"cherry">>}, cursor_prev(Cursor)),
?assertMatch({ok, <<"d">>, <<"date">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"e">>, <<"elephant">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"f">>, <<"forest">>}, cursor_next(Cursor)),
?assertMatch({ok, <<"g">>, <<"gooseberry">>}, cursor_next(Cursor)),
?assertMatch(not_found, cursor_next(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"fold keys",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch([<<"g">>, <<"f">>, <<"e">>, <<"d">>, <<"c">>, <<"b">>, <<"a">>],
fold_keys(Cursor, fun(Key, Acc) -> [Key | Acc] end, [])),
?assertMatch(ok, cursor_close(Cursor))
end},
{"search for an item",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"banana">>}, cursor_search(Cursor, <<"b">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"proxmity search for an item, and find it",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, match}, cursor_search_near(Cursor, <<"e">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"proxmity search for an item, find next smallest key",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, lt}, cursor_search_near(Cursor, <<"z">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"proxmity search for an item, find next largest key",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, gt}, cursor_search_near(Cursor, <<"0">>)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"check cursor reset",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_reset(Cursor)),
?assertMatch({ok, <<"apple">>}, cursor_next_value(Cursor)),
?assertMatch(ok, cursor_close(Cursor))
end},
{"insert/overwrite an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
?assertMatch(ok, cursor_insert(Cursor, <<"h">>, <<"huckleberry">>)),
?assertMatch({ok, <<"huckleberry">>}, cursor_search(Cursor, <<"h">>)),
?assertMatch(ok, cursor_insert(Cursor, <<"g">>, <<"grapefruit">>)),
?assertMatch({ok, <<"grapefruit">>}, cursor_search(Cursor, <<"g">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch({ok, <<"grapefruit">>}, get(ConnRef, "table:test", <<"g">>)),
?assertMatch({ok, <<"huckleberry">>}, get(ConnRef, "table:test", <<"h">>))
end},
{"update an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test", [{overwrite, false}, {raw,true}]),
?assertMatch(ok, cursor_update(Cursor, <<"g">>, <<"goji berries">>)),
?assertMatch(not_found, cursor_update(Cursor, <<"k">>, <<"kumquat">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch({ok, <<"goji berries">>}, get(ConnRef, "table:test", <<"g">>))
end},
{"remove an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test", [{overwrite, false}, {raw,true}]),
?assertMatch(ok, cursor_remove(Cursor, <<"g">>)),
?assertMatch(not_found, cursor_remove(Cursor, <<"l">>)),
?assertMatch(ok, cursor_close(Cursor)),
?assertMatch(not_found, get(ConnRef, "table:test", <<"g">>))
end}]}
end}.
-ifdef(EQC).
qc(P) ->
?assert(eqc:quickcheck(?QC_OUT(P))).
keys() ->
eqc_gen:non_empty(list(eqc_gen:non_empty(binary()))).
values() ->
eqc_gen:non_empty(list(binary())).
ops(Keys, Values) ->
{oneof([put, delete]), oneof(Keys), oneof(Values)}.
apply_kv_ops([], _ConnRef, _Tbl, Acc0) ->
Acc0;
apply_kv_ops([{put, K, V} | Rest], ConnRef, Tbl, Acc0) ->
ok = wterl:put(ConnRef, Tbl, K, V),
apply_kv_ops(Rest, ConnRef, Tbl, orddict:store(K, V, Acc0));
apply_kv_ops([{delete, K, _} | Rest], ConnRef, Tbl, Acc0) ->
ok = case wterl:delete(ConnRef, Tbl, K) of
ok ->
ok;
not_found ->
ok;
Else ->
Else
end,
apply_kv_ops(Rest, ConnRef, Tbl, orddict:store(K, deleted, Acc0)).
prop_put_delete() ->
?LET({Keys, Values}, {keys(), values()},
?FORALL(Ops, eqc_gen:non_empty(list(ops(Keys, Values))),
begin
DataDir = "test/wterl.putdelete.qc",
Table = "table:eqc",
{ok, CWD} = file:get_cwd(),
rmdir:path(filename:join([CWD, DataDir])), % ?cmd("rm -rf " ++ filename:join([CWD, DataDir])),
ok = filelib:ensure_dir(filename:join([DataDir, "x"])),
{ok, ConnRef} = wterl:connection_open(DataDir, [{create,true}]),
try
wterl:create(ConnRef, Table),
Model = apply_kv_ops(Ops, ConnRef, Table, []),
%% Validate that all deleted values return not_found
F = fun({K, deleted}) ->
?assertEqual(not_found, wterl:get(ConnRef, Table, K));
({K, V}) ->
?assertEqual({ok, V}, wterl:get(ConnRef, Table, K))
end,
lists:map(F, Model),
true
after
wterl:connection_close(ConnRef)
end
end)).
prop_put_delete_test_() ->
{timeout, 3*60, fun() -> qc(prop_put_delete()) end}.
-endif.
-endif.

View file

@ -1,111 +0,0 @@
%% -------------------------------------------------------------------
%%
%% wterl: Erlang Wrapper for WiredTiger
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-module(wterl_event_handler).
-behaviour(gen_server).
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-endif.
%% API
-export([start_link/0, stop/0]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
-define(PREFIX, "wiredtiger").
%% ====================================================================
%% API
%% ====================================================================
-spec start_link() -> {ok, pid()} | {error, term()}.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-spec stop() -> ok.
stop() ->
gen_server:cast(?MODULE, stop).
%% ====================================================================
%% gen_server callbacks
%% ====================================================================
init([]) ->
wterl:set_event_handler_pid(self()),
{ok, []}.
handle_call(_Msg, _From, State) ->
{reply, ok, State}.
handle_cast(stop, State) ->
{stop, normal, State};
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info({error, {Errno, Message}}, State) ->
log(error, "~s: (~s) ~s", [?PREFIX, Errno, Message]),
{noreply, State};
handle_info({message, Info}, State) ->
log(info, "~s: ~s", [?PREFIX, Info]),
{noreply, State};
handle_info({progress, {Operation, Counter}}, State) ->
log(info, "~s: progress on ~s [~b]", [?PREFIX, Operation, Counter]),
{noreply, State};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%% ====================================================================
%% Internal functions
%% ====================================================================
%% @private
-spec log(error | info, string(), [any()]) -> ok.
log(Urgency, Format, Args) ->
case proplists:is_defined(lager, application:which_applications()) of
true ->
log(lager, Urgency, Format, Args);
false ->
log(stdio, Urgency, Format, Args)
end.
-spec log(lager | stdio, error | info, string(), [any()]) -> ok.
log(lager, error, Format, Args) ->
lager:error(Format, Args);
log(lager, info, Format, Args) ->
lager:info(Format, Args);
log(stdio, _, Format, Args) ->
io:format(Format ++ "~n", Args).
%% ===================================================================
%% EUnit tests
%% ===================================================================
-ifdef(TEST).
-endif.

View file

@ -1,91 +0,0 @@
-module(basho_bench_driver_wterl).
-record(state, { connection, uri }).
-export([new/1,
run/4]).
-include_lib("basho_bench/include/basho_bench.hrl").
%% ====================================================================
%% API
%% ====================================================================
new(1) ->
%% Make sure wterl is available
case code:which(wterl) of
non_existing ->
?FAIL_MSG("~s requires wterl to be available on code path.\n",
[?MODULE]);
_ ->
ok
end,
{ok, _} = wterl_sup:start_link(),
setup(1);
new(Id) ->
setup(Id).
setup(Id) ->
%% Get the target directory
Dir = basho_bench_config:get(wterl_dir, "/tmp"),
Config = basho_bench_config:get(wterl, []),
Uri = config_value(table_uri, Config, "lsm:test"),
ConnectionOpts = config_value(connection, Config, [{create,true},{session_max, 8192}]),
SessionOpts = config_value(session, Config, []),
TableOpts = config_value(table, Config, []),
%% Start WiredTiger
Connection =
case wterl_conn:is_open() of
false ->
case wterl_conn:open(Dir, ConnectionOpts, SessionOpts) of
{ok, Conn} ->
Conn;
{error, Reason0} ->
?FAIL_MSG("Failed to establish a WiredTiger connection for ~p, wterl backend unable to start: ~p\n", [Id, Reason0])
end;
true ->
{ok, Conn} = wterl_conn:get(),
Conn
end,
case wterl:create(Connection, Uri, TableOpts) of
ok ->
{ok, #state{connection=Connection, uri=Uri}};
{error, Reason} ->
{error, Reason}
end.
run(get, KeyGen, _ValueGen, #state{connection=Connection, uri=Uri}=State) ->
case wterl:get(Connection, Uri, KeyGen()) of
{ok, _Value} ->
{ok, State};
not_found ->
{ok, State};
{error, Reason} ->
{error, Reason}
end;
run(put, KeyGen, ValueGen, #state{connection=Connection, uri=Uri}=State) ->
case wterl:put(Connection, Uri, KeyGen(), ValueGen()) of
ok ->
{ok, State};
{error, Reason} ->
{error, Reason}
end;
run(delete, KeyGen, _ValueGen, #state{connection=Connection, uri=Uri}=State) ->
case wterl:delete(Connection, Uri, KeyGen()) of
ok ->
{ok, State};
not_found ->
{ok, State};
{error, Reason} ->
{error, Reason}
end.
config_value(Key, Config, Default) ->
case proplists:get_value(Key, Config) of
undefined ->
Default;
Value ->
Value
end.

View file

@ -1,102 +0,0 @@
%%-*- mode: erlang -*-
%% ex: ft=erlang ts=4 sw=4 et
%% How to:
%% * put the wterl-b_b.config file into basho_bench/examples
%% * put the basho_bench_driver_wterl.erl into basho_bench/src
%% * make clean in basho_bench, then make
%% * edit examples/wterl-b_b.config
%% - change {code_paths, ["../wterl"]}. to be a relative path to your
%% wterl directory
%% - change {wterl_dir, "/home/gburd/ws/basho_bench/data"}. to a fully
%% qualified location for your test data files (mkdir that directory
%% yourself, if it doesn't exist the test will fail 'enoent')
%% * to run, replace this path with the proper path on your system:
%% LD_LIBRARY_PATH=/home/you/wterl/priv ./basho_bench examples/wterl-b_b.config
%% * the test should run for 10 minutes (as it is configured right now)
%% with 4 concurrent workers accessing the same table
%%
%% Note:
%% There are two config sections in wt.config {wterl, [ ... ]}. and
%% {wterl_, [ ... ]}. The one being used is named "wterl" the other
%% config is ignored. I setup an LSM and BTREE config and to choose
%% which is run you just rename those two sections (turn one off by
%% adding a "_" to the name and take the "_" out of the other's name).
{mode, max}.
{duration, 10}.
{concurrent, 16}.
{report_interval, 1}.
{pb_timeout_general, 1000}. % ms
%{pb_timeout_read, ?}.
%{pb_timeout_write, ?}.
%{pb_timeout_listkeys, ?}.
%{pb_timeout_mapreduce, ?}.
{driver, basho_bench_driver_wterl}.
{key_generator, {int_to_bin_littleendian,{uniform_int, 5000000}}}.
{value_generator, {fixed_bin, 10000}}.
{operations, [{get, 4}, {put, 4}, {delete, 2}]}.
{code_paths, ["../wterl"]}.
{wterl_dir, "/home/gburd/ws/basho_bench/data"}.
%% lsm
{wterl, [
{connection, [
{create, true},
{session_sync, false},
{transaction_sync, "none"},
{log, [{enabled, false}]},
{session_max, 1024},
{cache_size, 4294967296},
{verbose, []},
% "salvage", "verify" are okay, however...
% for some unknown reason, if you add these additional
% verbose flags Erlang SEGV's "size_object: bad tag for 0x80"
% no idea why... yet... you've been warned.
%"block", "shared_cache", "reconcile", "evict", "lsm",
%"fileops", "read", "write", "readserver", "evictserver",
%"hazard", "mutex", "ckpt"
{statistics_log, [{wait, 30}]}
]},
{session, [ {isolation, "snapshot"} ]},
{table_uri, "lsm:test"},
{lsm_merge_threads, 2},
{table, [
{internal_page_max, "128K"},
{leaf_page_max, "128K"},
{lsm_chunk_size, "25MB"},
{lsm_bloom_newest, true},
{lsm_bloom_oldest, true} ,
{lsm_bloom_bit_count, 128},
{lsm_bloom_hash_count, 64},
{lsm_bloom_config, [{leaf_page_max, "8MB"}]},
{block_compressor, "snappy"} % bzip2
]}
]}.
%% btree
{wterl_, [
{connection, [
{create, true},
{session_sync, false},
{transaction_sync, "none"},
{log, [{enabled, false}]},
{session_max, 1024},
{cache_size, 4294967296},
{verbose, []},
% "salvage", "verify" are okay, however...
% for some unknown reason, if you add these additional
% verbose flags Erlang SEGV's "size_object: bad tag for 0x80"
% no idea why... yet... you've been warned.
%"block", "shared_cache", "reconcile", "evict", "lsm",
%"fileops", "read", "write", "readserver", "evictserver",
%"hazard", "mutex", "ckpt"
{statistics_log, [{wait, 30}]},
{checkpoint, [{await, 10}]}
]},
{session, [ {isolation, "snapshot"} ]},
{table_uri, "table:test"},
{table, [
{block_compressor, "snappy"} % bzip2
]}
]}.

View file

@ -1,10 +0,0 @@
#!/bin/sh -
# Note: also, remember to update version numbers in rpath specs so that shared libs can be found at runtime!!!
wterl=`git describe --always --long --tags`
wiredtiger0=`(cd c_src/wiredtiger-[0-9.]* && git describe --always --long --tags)`
wiredtiger=`echo $wiredtiger0 | awk '{print $2}'`
echo $wterl
echo $wiredtiger