Use async nifs technique to avoid blocking the BEAM's schedulers #1

Closed
gburd wants to merge 15 commits from gsb-async-nifs into master
5 changed files with 1964 additions and 533 deletions
Showing only changes of commit eacc8514ac - Show all commits

305
c_src/async_nif.h Normal file
View file

@ -0,0 +1,305 @@
/*
*
* async_nif: An async thread-pool layer for Erlang's NIF API
*
* Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
* Author: Gregory Burd <greg@basho.com> <greg@burd.me>
*
* This file is provided to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
#ifndef __ASYNC_NIF_H__
#define __ASYNC_NIF_H__
#if defined(__cplusplus)
extern "C" {
#endif
/* Redefine this in your NIF implementation before including this file to
change the thread pool size. The maximum number of threads might be
bounded on your OS. For instance, to allow 1,000,000 threads on a Linux
system you must do the following before launching the process.
echo 1000000 > /proc/sys/kernel/threads-max
and for all UNIX systems there will be ulimit maximums. */
#ifndef ANIF_MAX_WORKERS
#define ANIF_MAX_WORKERS 64
#endif
#ifndef __offsetof
#define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif
#include "queue.h"
struct anif_req_entry {
ERL_NIF_TERM pid;
ErlNifEnv *env;
ERL_NIF_TERM ref, *argv;
void *args;
void *priv_data;
void (*fn_work)(ErlNifEnv*, ERL_NIF_TERM, void *, ErlNifPid*, void *);
void (*fn_post)(void *);
STAILQ_ENTRY(anif_req_entry) entries;
};
STAILQ_HEAD(reqs, anif_req_entry) anif_reqs = STAILQ_HEAD_INITIALIZER(anif_reqs);
struct anif_worker_entry {
ErlNifTid tid;
LIST_ENTRY(anif_worker_entry) entries;
};
LIST_HEAD(idle_workers, anif_worker_entry) anif_idle_workers = LIST_HEAD_INITIALIZER(anif_worker);
static volatile unsigned int anif_req_count = 0;
static volatile unsigned int anif_shutdown = 0;
static ErlNifMutex *anif_req_mutex = NULL;
static ErlNifMutex *anif_worker_mutex = NULL;
static ErlNifCond *anif_cnd = NULL;
static struct anif_worker_entry anif_worker_entries[ANIF_MAX_WORKERS];
#define ASYNC_NIF_DECL(decl, frame, pre_block, work_block, post_block) \
struct decl ## _args frame; \
static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, void *priv_data, ErlNifPid *pid, struct decl ## _args *args) \
work_block \
static void fn_post_ ## decl (struct decl ## _args *args) { \
do post_block while(0); \
} \
static ERL_NIF_TERM decl(ErlNifEnv* env_in, int argc_in, const ERL_NIF_TERM argv_in[]) { \
struct decl ## _args on_stack_args; \
struct decl ## _args *args = &on_stack_args; \
struct decl ## _args *copy_of_args; \
struct anif_req_entry *req = NULL; \
ErlNifPid pid_in; \
ErlNifEnv *env = NULL; \
ERL_NIF_TERM ref; \
ERL_NIF_TERM *argv = NULL; \
int __i = 0, argc = argc_in - 1; \
enif_self(env_in, &pid_in); \
if (anif_shutdown) { \
enif_send(NULL, &pid_in, env_in, \
enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "shutdown"))); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "shutdown")); \
} \
env = enif_alloc_env(); \
if (!env) \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
ref = enif_make_copy(env, argv_in[0]); \
if (!ref) { \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
copy_of_args = (struct decl ## _args *)enif_alloc(sizeof(struct decl ## _args)); \
if (!copy_of_args) { \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
argv = (ERL_NIF_TERM *)enif_alloc((sizeof(ERL_NIF_TERM) * argc)); \
if (!argv) { \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
req = (struct anif_req_entry*)enif_alloc(sizeof(struct anif_req_entry)); \
if (!req) { \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
for (__i = 0; __i < argc; __i++) { \
argv[__i] = enif_make_copy(env, argv_in[(__i + 1)]); \
if (!argv[__i]) { \
enif_free(req); \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
} \
do pre_block while(0); \
memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \
req->ref = ref; \
req->pid = enif_make_pid(env, &pid_in); \
req->args = (void*)copy_of_args; \
req->argv = argv; \
req->env = env; \
req->priv_data = enif_priv_data(env_in); \
req->fn_work = (void (*)(ErlNifEnv *, ERL_NIF_TERM, void*, ErlNifPid*, void *))fn_work_ ## decl ; \
req->fn_post = (void (*)(void *))fn_post_ ## decl; \
anif_enqueue_req(req); \
return enif_make_tuple2(env, enif_make_atom(env, "ok"), \
enif_make_int(env, anif_req_count)); \
}
#define ASYNC_NIF_LOAD() if (anif_init() != 0) return -1;
#define ASYNC_NIF_UNLOAD() anif_unload();
#define ASYNC_NIF_UPGRADE() anif_unload();
#define ASYNC_NIF_PRE_ENV() env_in
#define ASYNC_NIF_PRE_RETURN_CLEANUP() \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free(req); \
enif_free_env(env);
#define ASYNC_NIF_RETURN_BADARG() ASYNC_NIF_PRE_RETURN_CLEANUP(); return enif_make_badarg(env_in);
#define ASYNC_NIF_REPLY(msg) enif_send(NULL, pid, env, enif_make_tuple2(env, ref, msg))
static void anif_enqueue_req(struct anif_req_entry *r)
{
/* Add the request to the work queue. */
enif_mutex_lock(anif_req_mutex);
STAILQ_INSERT_TAIL(&anif_reqs, r, entries);
anif_req_count++;
enif_mutex_unlock(anif_req_mutex);
enif_cond_broadcast(anif_cnd);
}
static void *anif_worker_fn(void *arg)
{
struct anif_worker_entry *worker = (struct anif_worker_entry *)arg;
struct anif_req_entry *req = NULL;
/*
* Workers are active while there is work on the queue to do and
* only in the idle list when they are waiting on new work.
*/
for(;;) {
/* Examine the request queue, are there things to be done? */
enif_mutex_lock(anif_req_mutex);
enif_mutex_lock(anif_worker_mutex);
LIST_INSERT_HEAD(&anif_idle_workers, worker, entries);
enif_mutex_unlock(anif_worker_mutex);
check_again_for_work:
if (anif_shutdown) { enif_mutex_unlock(anif_req_mutex); break; }
if ((req = STAILQ_FIRST(&anif_reqs)) == NULL) {
/* Queue is empty, join the list of idle workers and wait for work */
enif_cond_wait(anif_cnd, anif_req_mutex);
goto check_again_for_work;
} else {
/* `req` is our work request and we hold the lock. */
enif_cond_broadcast(anif_cnd);
/* Take the request off the queue. */
STAILQ_REMOVE(&anif_reqs, req, anif_req_entry, entries); anif_req_count--;
/* Now we need to remove this thread from the list of idle threads. */
enif_mutex_lock(anif_worker_mutex);
LIST_REMOVE(worker, entries);
/* Release the locks in reverse order that we acquired them,
so as not to self-deadlock. */
enif_mutex_unlock(anif_worker_mutex);
enif_mutex_unlock(anif_req_mutex);
/* Finally, let's do the work! :) */
ErlNifPid pid;
enif_get_local_pid(req->env, req->pid, &pid);
req->fn_work(req->env, req->ref, req->priv_data, &pid, req->args);
req->fn_post(req->args);
enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env);
enif_free(req);
}
}
enif_thread_exit(0);
return 0;
}
static void anif_unload(void)
{
unsigned int i;
/* Signal the worker threads, stop what you're doing and exit. */
enif_mutex_lock(anif_req_mutex);
anif_shutdown = 1;
enif_cond_broadcast(anif_cnd);
enif_mutex_unlock(anif_req_mutex);
/* Join for the now exiting worker threads. */
for (i = 0; i < ANIF_MAX_WORKERS; ++i) {
void *exit_value = 0; /* Ignore this. */
enif_thread_join(anif_worker_entries[i].tid, &exit_value);
}
/* We won't get here until all threads have exited.
Patch things up, and carry on. */
enif_mutex_lock(anif_req_mutex);
/* Worker threads are stopped, now toss anything left in the queue. */
struct anif_req_entry *req = NULL;
STAILQ_FOREACH(req, &anif_reqs, entries) {
STAILQ_REMOVE(&anif_reqs, STAILQ_LAST(&anif_reqs, anif_req_entry, entries),
anif_req_entry, entries);
ErlNifPid pid;
enif_get_local_pid(req->env, req->pid, &pid);
enif_send(NULL, &pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown")));
req->fn_post(req->args);
enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env);
enif_free(req);
anif_req_count--;
}
enif_mutex_unlock(anif_req_mutex);
enif_cond_destroy(anif_cnd);
/* Not strictly necessary. */
memset(anif_worker_entries, sizeof(struct anif_worker_entry) * ANIF_MAX_WORKERS, 0);
enif_mutex_destroy(anif_req_mutex); anif_req_mutex = NULL;
enif_mutex_destroy(anif_worker_mutex); anif_worker_mutex = NULL;
}
static int anif_init(void)
{
unsigned int i;
/* Don't init more than once. */
if (anif_req_mutex) return 0;
anif_req_mutex = enif_mutex_create(NULL);
anif_worker_mutex = enif_mutex_create(NULL);
anif_cnd = enif_cond_create(NULL);
/* Setup the requests management. */
anif_req_count = 0;
/* Setup the thread pool management. */
enif_mutex_lock(anif_worker_mutex);
for (i = 0; i < ANIF_MAX_WORKERS; i++) {
enif_thread_create(NULL, &anif_worker_entries[i].tid,
&anif_worker_fn, (void*)&anif_worker_entries[i], NULL);
}
enif_mutex_unlock(anif_worker_mutex);
return 0;
}
#if defined(__cplusplus)
}
#endif
#endif // __ASYNC_NIF_H__

570
c_src/queue.h Normal file
View file

@ -0,0 +1,570 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $
*/
#ifndef _DB_QUEUE_H_
#define _DB_QUEUE_H_
#if defined(__cplusplus)
extern "C" {
#endif
/*
* This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues.
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A singly-linked tail queue is headed by a pair of pointers, one to the
* head of the list and the other to the tail of the list. The elements are
* singly linked for minimum space and pointer manipulation overhead at the
* expense of O(n) removal for arbitrary elements. New elements can be added
* to the list after an existing element, at the head of the list, or at the
* end of the list. Elements being removed from the head of the tail queue
* should use the explicit macro for this purpose for optimum efficiency.
* A singly-linked tail queue may only be traversed in the forward direction.
* Singly-linked tail queues are ideal for applications with large datasets
* and few or no removals or for implementing a FIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* For details on the use of these macros, see the queue(3) manual page.
*
*
* SLIST LIST STAILQ TAILQ
* _HEAD + + + +
* _HEAD_INITIALIZER + + + +
* _ENTRY + + + +
* _INIT + + + +
* _EMPTY + + + +
* _FIRST + + + +
* _NEXT + + + +
* _PREV - - - +
* _LAST - - + +
* _FOREACH + + + +
* _FOREACH_REVERSE - - - +
* _INSERT_HEAD + + + +
* _INSERT_BEFORE - + - +
* _INSERT_AFTER + + + +
* _INSERT_TAIL - - + +
* _CONCAT - - + +
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
*
*/
/*
* XXX
* We #undef all of the macros because there are incompatible versions of this
* file and these macros on various systems. What makes the problem worse is
* they are included and/or defined by system include files which we may have
* already loaded into Berkeley DB before getting here. For example, FreeBSD's
* <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
* several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these
* same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours.
*/
#undef LIST_EMPTY
#undef LIST_ENTRY
#undef LIST_FIRST
#undef LIST_FOREACH
#undef LIST_HEAD
#undef LIST_HEAD_INITIALIZER
#undef LIST_INIT
#undef LIST_INSERT_AFTER
#undef LIST_INSERT_BEFORE
#undef LIST_INSERT_HEAD
#undef LIST_NEXT
#undef LIST_REMOVE
#undef QMD_TRACE_ELEM
#undef QMD_TRACE_HEAD
#undef QUEUE_MACRO_DEBUG
#undef SLIST_EMPTY
#undef SLIST_ENTRY
#undef SLIST_FIRST
#undef SLIST_FOREACH
#undef SLIST_FOREACH_PREVPTR
#undef SLIST_HEAD
#undef SLIST_HEAD_INITIALIZER
#undef SLIST_INIT
#undef SLIST_INSERT_AFTER
#undef SLIST_INSERT_HEAD
#undef SLIST_NEXT
#undef SLIST_REMOVE
#undef SLIST_REMOVE_HEAD
#undef STAILQ_CONCAT
#undef STAILQ_EMPTY
#undef STAILQ_ENTRY
#undef STAILQ_FIRST
#undef STAILQ_FOREACH
#undef STAILQ_HEAD
#undef STAILQ_HEAD_INITIALIZER
#undef STAILQ_INIT
#undef STAILQ_INSERT_AFTER
#undef STAILQ_INSERT_HEAD
#undef STAILQ_INSERT_TAIL
#undef STAILQ_LAST
#undef STAILQ_NEXT
#undef STAILQ_REMOVE
#undef STAILQ_REMOVE_HEAD
#undef STAILQ_REMOVE_HEAD_UNTIL
#undef TAILQ_CONCAT
#undef TAILQ_EMPTY
#undef TAILQ_ENTRY
#undef TAILQ_FIRST
#undef TAILQ_FOREACH
#undef TAILQ_FOREACH_REVERSE
#undef TAILQ_HEAD
#undef TAILQ_HEAD_INITIALIZER
#undef TAILQ_INIT
#undef TAILQ_INSERT_AFTER
#undef TAILQ_INSERT_BEFORE
#undef TAILQ_INSERT_HEAD
#undef TAILQ_INSERT_TAIL
#undef TAILQ_LAST
#undef TAILQ_NEXT
#undef TAILQ_PREV
#undef TAILQ_REMOVE
#undef TRACEBUF
#undef TRASHIT
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */
struct qm_trace {
char * lastfile;
int lastline;
char * prevfile;
int prevline;
};
#define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
#define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \
(head)->trace.prevfile = (head)->trace.lastfile; \
(head)->trace.lastline = __LINE__; \
(head)->trace.lastfile = __FILE__; \
} while (0)
#define QMD_TRACE_ELEM(elem) do { \
(elem)->trace.prevline = (elem)->trace.lastline; \
(elem)->trace.prevfile = (elem)->trace.lastfile; \
(elem)->trace.lastline = __LINE__; \
(elem)->trace.lastfile = __FILE__; \
} while (0)
#else
#define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head)
#define TRACEBUF
#define TRASHIT(x)
#endif /* QUEUE_MACRO_DEBUG */
/*
* Singly-linked List declarations.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_FOREACH(var, head, field) \
for ((var) = SLIST_FIRST((head)); \
(var); \
(var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
(varp) = &SLIST_NEXT((var), field))
#define SLIST_INIT(head) do { \
SLIST_FIRST((head)) = NULL; \
} while (0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
SLIST_NEXT((slistelm), field) = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
SLIST_FIRST((head)) = (elm); \
} while (0)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \
if (SLIST_FIRST((head)) == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = SLIST_FIRST((head)); \
while (curelm != NULL && \
SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \
if (curelm != NULL) \
SLIST_NEXT(curelm, field) = \
SLIST_NEXT(SLIST_NEXT(curelm, field), field);\
} \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
/*
* Singly-linked Tail queue declarations.
*/
#define STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first;/* first element */ \
struct type **stqh_last;/* addr of last next element */ \
}
#define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \
} \
} while (0)
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_FOREACH(var, head, field) \
for ((var) = STAILQ_FIRST((head)); \
(var); \
(var) = STAILQ_NEXT((var), field))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_NEXT((tqelm), field) = (elm); \
} while (0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_FIRST((head)) = (elm); \
} while (0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
STAILQ_NEXT((elm), field) = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \
NULL : \
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \
if (STAILQ_FIRST((head)) == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = STAILQ_FIRST((head)); \
while (STAILQ_NEXT(curelm, field) != (elm)) \
curelm = STAILQ_NEXT(curelm, field); \
if ((STAILQ_NEXT(curelm, field) = \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
} \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
if ((STAILQ_FIRST((head)) = \
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
/*
* List declarations.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_FOREACH(var, head, field) \
for ((var) = LIST_FIRST((head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
LIST_NEXT((listelm), field)->field.le_prev = \
&LIST_NEXT((elm), field); \
LIST_NEXT((listelm), field) = (elm); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
LIST_FIRST((head)) = (elm); \
(elm)->field.le_prev = &LIST_FIRST((head)); \
} while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_REMOVE(elm, field) do { \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
} while (0)
/*
* Tail queue declarations.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
TRACEBUF \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
TRACEBUF \
}
/*
* Tail queue functions.
*/
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_HEAD(head2); \
} \
} while (0)
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST((head)); \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
TAILQ_NEXT((elm), field)->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else { \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
TAILQ_FIRST((head)) = (elm); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#if defined(__cplusplus)
}
#endif
#endif /* !_DB_QUEUE_H_ */

File diff suppressed because it is too large Load diff

43
src/async_nif.hrl Normal file
View file

@ -0,0 +1,43 @@
%% -------------------------------------------------------------------
%%
%% async_nif: An async thread-pool layer for Erlang's NIF API
%%
%% Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
%% Author: Gregory Burd <greg@basho.com> <greg@burd.me>
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
%% except in compliance with the License. You may obtain
%% a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing,
%% software distributed under the License is distributed on an
%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
%% KIND, either express or implied. See the License for the
%% specific language governing permissions and limitations
%% under the License.
%%
%% -------------------------------------------------------------------
-define(ASYNC_NIF_CALL(Fun, Args),
begin
NIFRef = erlang:make_ref(),
case erlang:apply(Fun, [NIFRef|Args]) of
{ok, QDepth} ->
erlang:bump_reductions(100 * QDepth),
receive
{NIFRef, {error, shutdown}=Error} ->
%% Work unit was not executed, requeue it?
Error;
{NIFRef, {error, _Reason}=Error} ->
%% Work unit returned an error.
Error;
{NIFRef, Reply} ->
Reply
end;
Other ->
Other
end
end).

View file

@ -61,6 +61,8 @@
fold_keys/3,
fold/3]).
-include("async_nif.hrl").
-ifdef(TEST).
-ifdef(EQC).
-include_lib("eqc/include/eqc.hrl").
@ -101,143 +103,259 @@ init() ->
erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
-spec conn_open(string(), config()) -> {ok, connection()} | {error, term()}.
conn_open(_HomeDir, _Config) ->
conn_open(HomeDir, Config) ->
?ASYNC_NIF_CALL(fun conn_open_nif/3, [HomeDir, Config]).
-spec conn_open_nif(reference(), string(), config()) -> {ok, connection()} | {error, term()}.
conn_open_nif(_AsyncRef, _HomeDir, _Config) ->
?nif_stub.
-spec conn_close(connection()) -> ok | {error, term()}.
conn_close(_ConnRef) ->
conn_close(ConnRef) ->
?ASYNC_NIF_CALL(fun conn_close_nif/2, [ConnRef]).
-spec conn_close_nif(reference(), connection()) -> ok | {error, term()}.
conn_close_nif(_AsyncRef, _ConnRef) ->
?nif_stub.
-spec session_open(connection()) -> {ok, session()} | {error, term()}.
session_open(_ConnRef) ->
session_open(ConnRef) ->
?ASYNC_NIF_CALL(fun session_open_nif/2, [ConnRef]).
-spec session_open_nif(reference(), connection()) -> {ok, session()} | {error, term()}.
session_open_nif(_AsyncRef, _ConnRef) ->
?nif_stub.
-spec session_close(session()) -> ok | {error, term()}.
session_close(_Ref) ->
session_close(Ref) ->
?ASYNC_NIF_CALL(fun session_close_nif/2, [Ref]).
-spec session_close_nif(reference(), session()) -> ok | {error, term()}.
session_close_nif(_AsyncRef, _Ref) ->
?nif_stub.
-spec session_create(session(), string()) -> ok | {error, term()}.
-spec session_create(session(), string(), config()) -> ok | {error, term()}.
session_create(Ref, Name) ->
session_create(Ref, Name, ?EMPTY_CONFIG).
session_create(_Ref, _Name, _Config) ->
session_create(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_create_nif/4, [Ref, Name, Config]).
-spec session_create_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_create_nif(_AsyncNif, _Ref, _Name, _Config) ->
?nif_stub.
-spec session_drop(session(), string()) -> ok | {error, term()}.
-spec session_drop(session(), string(), config()) -> ok | {error, term()}.
session_drop(Ref, Name) ->
session_drop(Ref, Name, ?EMPTY_CONFIG).
session_drop(_Ref, _Name, _Config) ->
session_drop(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_drop_nif/4, [Ref, Name, Config]).
-spec session_drop_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_drop_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec session_delete(session(), string(), key()) -> ok | {error, term()}.
session_delete(_Ref, _Table, _Key) ->
session_delete(Ref, Table, Key) ->
?ASYNC_NIF_CALL(fun session_delete_nif/4, [Ref, Table, Key]).
-spec session_delete_nif(reference(), session(), string(), key()) -> ok | {error, term()}.
session_delete_nif(_AsyncRef, _Ref, _Table, _Key) ->
?nif_stub.
-spec session_get(session(), string(), key()) -> {ok, value()} | not_found | {error, term()}.
session_get(_Ref, _Table, _Key) ->
session_get(Ref, Table, Key) ->
?ASYNC_NIF_CALL(fun session_get_nif/4, [Ref, Table, Key]).
-spec session_get_nif(reference(), session(), string(), key()) -> {ok, value()} | not_found | {error, term()}.
session_get_nif(_AsyncRef, _Ref, _Table, _Key) ->
?nif_stub.
-spec session_put(session(), string(), key(), value()) -> ok | {error, term()}.
session_put(_Ref, _Table, _Key, _Value) ->
session_put(Ref, Table, Key, Value) ->
?ASYNC_NIF_CALL(fun session_put_nif/5, [Ref, Table, Key, Value]).
-spec session_put_nif(reference(), session(), string(), key(), value()) -> ok | {error, term()}.
session_put_nif(_AsyncRef, _Ref, _Table, _Key, _Value) ->
?nif_stub.
-spec session_rename(session(), string(), string()) -> ok | {error, term()}.
-spec session_rename(session(), string(), string(), config()) -> ok | {error, term()}.
session_rename(Ref, OldName, NewName) ->
session_rename(Ref, OldName, NewName, ?EMPTY_CONFIG).
session_rename(_Ref, _OldName, _NewName, _Config) ->
session_rename(Ref, OldName, NewName, Config) ->
?ASYNC_NIF_CALL(fun session_rename_nif/5, [Ref, OldName, NewName, Config]).
-spec session_rename_nif(reference(), session(), string(), string(), config()) -> ok | {error, term()}.
session_rename_nif(_AsyncRef, _Ref, _OldName, _NewName, _Config) ->
?nif_stub.
-spec session_salvage(session(), string()) -> ok | {error, term()}.
-spec session_salvage(session(), string(), config()) -> ok | {error, term()}.
session_salvage(Ref, Name) ->
session_salvage(Ref, Name, ?EMPTY_CONFIG).
session_salvage(_Ref, _Name, _Config) ->
session_salvage(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_salvage_nif/4, [Ref, Name, Config]).
-spec session_salvage_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_salvage_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec session_checkpoint(session()) -> ok | {error, term()}.
-spec session_checkpoint(session(), config()) -> ok | {error, term()}.
session_checkpoint(_Ref) ->
session_checkpoint(_Ref, ?EMPTY_CONFIG).
session_checkpoint(_Ref, _Config) ->
session_checkpoint(Ref, Config) ->
?ASYNC_NIF_CALL(fun session_checkpoint_nif/3, [Ref, Config]).
-spec session_checkpoint_nif(reference(), session(), config()) -> ok | {error, term()}.
session_checkpoint_nif(_AsyncRef, _Ref, _Config) ->
?nif_stub.
-spec session_truncate(session(), string()) -> ok | {error, term()}.
-spec session_truncate(session(), string(), config()) -> ok | {error, term()}.
session_truncate(Ref, Name) ->
session_truncate(Ref, Name, ?EMPTY_CONFIG).
session_truncate(_Ref, _Name, _Config) ->
session_truncate(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_truncate_nif/4, [Ref, Name, Config]).
-spec session_truncate_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_truncate_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec session_upgrade(session(), string()) -> ok | {error, term()}.
-spec session_upgrade(session(), string(), config()) -> ok | {error, term()}.
session_upgrade(Ref, Name) ->
session_upgrade(Ref, Name, ?EMPTY_CONFIG).
session_upgrade(_Ref, _Name, _Config) ->
session_upgrade(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_upgrade_nif/4, [Ref, Name, Config]).
-spec session_upgrade_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_upgrade_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec session_verify(session(), string()) -> ok | {error, term()}.
-spec session_verify(session(), string(), config()) -> ok | {error, term()}.
session_verify(Ref, Name) ->
session_verify(Ref, Name, ?EMPTY_CONFIG).
session_verify(_Ref, _Name, _Config) ->
session_verify(Ref, Name, Config) ->
?ASYNC_NIF_CALL(fun session_verify_nif/4, [Ref, Name, Config]).
-spec session_verify_nif(reference(), session(), string(), config()) -> ok | {error, term()}.
session_verify_nif(_AsyncRef, _Ref, _Name, _Config) ->
?nif_stub.
-spec cursor_open(session(), string()) -> {ok, cursor()} | {error, term()}.
cursor_open(_Ref, _Table) ->
cursor_open(Ref, Table) ->
?ASYNC_NIF_CALL(fun cursor_open_nif/3, [Ref, Table]).
-spec cursor_open_nif(reference(), session(), string()) -> {ok, cursor()} | {error, term()}.
cursor_open_nif(_AsyncRef, _Ref, _Table) ->
?nif_stub.
-spec cursor_close(cursor()) -> ok | {error, term()}.
cursor_close(_Cursor) ->
cursor_close(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_close_nif/2, [Cursor]).
-spec cursor_close_nif(reference(), cursor()) -> ok | {error, term()}.
cursor_close_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_next(_Cursor) ->
cursor_next(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_nif/2, [Cursor]).
-spec cursor_next_nif(reference(), cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_next_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_next_key(_Cursor) ->
cursor_next_key(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_key_nif/2, [Cursor]).
-spec cursor_next_key_nif(reference(), cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_next_key_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_next_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_next_value(_Cursor) ->
cursor_next_value(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_next_value_nif/2, [Cursor]).
-spec cursor_next_value_nif(reference(), cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_next_value_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev(cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_prev(_Cursor) ->
cursor_prev(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_nif/2, [Cursor]).
-spec cursor_prev_nif(reference(), cursor()) -> {ok, key(), value()} | not_found | {error, term()}.
cursor_prev_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev_key(cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_prev_key(_Cursor) ->
cursor_prev_key(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_key_nif/2, [Cursor]).
-spec cursor_prev_key_nif(reference(), cursor()) -> {ok, key()} | not_found | {error, term()}.
cursor_prev_key_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_prev_value(cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_prev_value(_Cursor) ->
cursor_prev_value(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_prev_value_nif/2, [Cursor]).
-spec cursor_prev_value_nif(reference(), cursor()) -> {ok, value()} | not_found | {error, term()}.
cursor_prev_value_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_search(cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search(_Cursor, _Key) ->
cursor_search(Cursor, Key) ->
?ASYNC_NIF_CALL(fun cursor_search_nif/3, [Cursor, Key]).
-spec cursor_search_nif(reference(), cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search_nif(_AsyncRef, _Cursor, _Key) ->
?nif_stub.
-spec cursor_search_near(cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search_near(_Cursor, _Key) ->
cursor_search_near(Cursor, Key) ->
?ASYNC_NIF_CALL(fun cursor_search_near_nif/3, [Cursor, Key]).
-spec cursor_search_near_nif(reference(), cursor(), key()) -> {ok, value()} | {error, term()}.
cursor_search_near_nif(_AsyncRef, _Cursor, _Key) ->
?nif_stub.
-spec cursor_reset(cursor()) -> ok | {error, term()}.
cursor_reset(_Cursor) ->
cursor_reset(Cursor) ->
?ASYNC_NIF_CALL(fun cursor_reset_nif/2, [Cursor]).
-spec cursor_reset_nif(reference(), cursor()) -> ok | {error, term()}.
cursor_reset_nif(_AsyncRef, _Cursor) ->
?nif_stub.
-spec cursor_insert(cursor(), key(), value()) -> ok | {error, term()}.
cursor_insert(_Cursor, _Key, _Value) ->
cursor_insert(Cursor, Key, Value) ->
?ASYNC_NIF_CALL(fun cursor_insert_nif/4, [Cursor, Key, Value]).
-spec cursor_insert_nif(reference(), cursor(), key(), value()) -> ok | {error, term()}.
cursor_insert_nif(_AsyncRef, _Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_update(cursor(), key(), value()) -> ok | {error, term()}.
cursor_update(_Cursor, _Key, _Value) ->
cursor_update(Cursor, Key, Value) ->
?ASYNC_NIF_CALL(fun cursor_update_nif/4, [Cursor, Key, Value]).
-spec cursor_update_nif(reference(), cursor(), key(), value()) -> ok | {error, term()}.
cursor_update_nif(_AsyncRef, _Cursor, _Key, _Value) ->
?nif_stub.
-spec cursor_remove(cursor(), key(), value()) -> ok | {error, term()}.
cursor_remove(_Cursor, _Key, _Value) ->
cursor_remove(Cursor, Key, Value) ->
?ASYNC_NIF_CALL(fun cursor_remove_nif/4, [Cursor, Key, Value]).
-spec cursor_remove_nif(reference(), cursor(), key(), value()) -> ok | {error, term()}.
cursor_remove_nif(_AsyncRef, _Cursor, _Key, _Value) ->
?nif_stub.
-type fold_keys_fun() :: fun((Key::binary(), any()) -> any()).