Use async nifs technique to avoid blocking the BEAM's schedulers #1

Closed
gburd wants to merge 15 commits from gsb-async-nifs into master
5 changed files with 616 additions and 535 deletions
Showing only changes of commit c0231923f9 - Show all commits

View file

@ -52,6 +52,9 @@ repl:
gdb-repl:
USE_GDB=1 $(ERL) -pz deps/*/ebin -pa ebin
eunit-repl:
erl -pa .eunit -pz deps/*/ebin -pz ebin -exec 'cd(".eunit").'
# NOTES
#

View file

@ -34,151 +34,116 @@ extern "C" {
system you must do the following before launching the process.
echo 1000000 > /proc/sys/kernel/threads-max
and for all UNIX systems there will be ulimit maximums. */
#ifndef ANIF_MAX_WORKERS
#define ANIF_MAX_WORKERS 64
#ifndef ASYNC_NIF_MAX_WORKERS
#define ASYNC_NIF_MAX_WORKERS 64
#endif
#ifndef __offsetof
#define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif
#include "queue.h"
struct anif_req_entry {
ERL_NIF_TERM pid;
ErlNifEnv *env;
struct async_nif_req_entry {
ERL_NIF_TERM ref, *argv;
ErlNifEnv *env;
ErlNifPid pid;
void *args;
void *priv_data;
void (*fn_work)(ErlNifEnv*, ERL_NIF_TERM, void *, ErlNifPid*, void *);
void (*fn_post)(void *);
STAILQ_ENTRY(anif_req_entry) entries;
STAILQ_ENTRY(async_nif_req_entry) entries;
};
STAILQ_HEAD(reqs, anif_req_entry) anif_reqs = STAILQ_HEAD_INITIALIZER(anif_reqs);
STAILQ_HEAD(reqs, async_nif_req_entry) async_nif_reqs = STAILQ_HEAD_INITIALIZER(async_nif_reqs);
struct anif_worker_entry {
struct async_nif_worker_entry {
ErlNifTid tid;
LIST_ENTRY(anif_worker_entry) entries;
LIST_ENTRY(async_nif_worker_entry) entries;
};
LIST_HEAD(idle_workers, anif_worker_entry) anif_idle_workers = LIST_HEAD_INITIALIZER(anif_worker);
LIST_HEAD(idle_workers, async_nif_worker_entry) async_nif_idle_workers = LIST_HEAD_INITIALIZER(async_nif_worker);
static volatile unsigned int async_nif_req_count = 0;
static volatile unsigned int async_nif_shutdown = 0;
static ErlNifMutex *async_nif_req_mutex = NULL;
static ErlNifMutex *async_nif_worker_mutex = NULL;
static ErlNifCond *async_nif_cnd = NULL;
static struct async_nif_worker_entry async_nif_worker_entries[ASYNC_NIF_MAX_WORKERS];
static volatile unsigned int anif_req_count = 0;
static volatile unsigned int anif_shutdown = 0;
static ErlNifMutex *anif_req_mutex = NULL;
static ErlNifMutex *anif_worker_mutex = NULL;
static ErlNifCond *anif_cnd = NULL;
static struct anif_worker_entry anif_worker_entries[ANIF_MAX_WORKERS];
#define ASYNC_NIF_DECL(decl, frame, pre_block, work_block, post_block) \
struct decl ## _args frame; \
static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, void *priv_data, ErlNifPid *pid, struct decl ## _args *args) \
work_block \
static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, void *priv_data, ErlNifPid *pid, struct decl ## _args *args) work_block \
static void fn_post_ ## decl (struct decl ## _args *args) { \
do post_block while(0); \
} \
static ERL_NIF_TERM decl(ErlNifEnv* env_in, int argc_in, const ERL_NIF_TERM argv_in[]) { \
static ERL_NIF_TERM decl(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv_in[]) { \
struct decl ## _args on_stack_args; \
struct decl ## _args *args = &on_stack_args; \
struct decl ## _args *copy_of_args; \
struct anif_req_entry *req = NULL; \
ErlNifPid pid_in; \
ErlNifEnv *env = NULL; \
ERL_NIF_TERM ref; \
ERL_NIF_TERM *argv = NULL; \
int __i = 0, argc = argc_in - 1; \
enif_self(env_in, &pid_in); \
if (anif_shutdown) { \
enif_send(NULL, &pid_in, env_in, \
enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "shutdown"))); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "shutdown")); \
struct async_nif_req_entry *req = NULL; \
ErlNifEnv *new_env = NULL; \
/* argv[0] is a ref used for selective recv */ \
const ERL_NIF_TERM *argv = argv_in + 1; \
argc--; \
if (async_nif_shutdown) \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "shutdown")); \
if (!(new_env = enif_alloc_env())) { \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "enomem")); \
} \
env = enif_alloc_env(); \
if (!env) \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
ref = enif_make_copy(env, argv_in[0]); \
if (!ref) { \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
do pre_block while(0); \
req = (struct async_nif_req_entry*)enif_alloc(sizeof(struct async_nif_req_entry)); \
if (!req) { \
fn_post_ ## decl (args); \
enif_free_env(new_env); \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "enomem")); \
} \
copy_of_args = (struct decl ## _args *)enif_alloc(sizeof(struct decl ## _args)); \
if (!copy_of_args) { \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
fn_post_ ## decl (args); \
enif_free_env(new_env); \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "enomem")); \
} \
argv = (ERL_NIF_TERM *)enif_alloc((sizeof(ERL_NIF_TERM) * argc)); \
if (!argv) { \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
req = (struct anif_req_entry*)enif_alloc(sizeof(struct anif_req_entry)); \
if (!req) { \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
for (__i = 0; __i < argc; __i++) { \
argv[__i] = enif_make_copy(env, argv_in[(__i + 1)]); \
if (!argv[__i]) { \
enif_free(req); \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
} \
do pre_block while(0); \
memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \
req->ref = ref; \
req->pid = enif_make_pid(env, &pid_in); \
req->env = new_env; \
req->ref = enif_make_copy(new_env, argv_in[0]); \
enif_self(env, &req->pid); \
req->args = (void*)copy_of_args; \
req->argv = argv; \
req->env = env; \
req->priv_data = enif_priv_data(env_in); \
req->priv_data = enif_priv_data(env); \
req->fn_work = (void (*)(ErlNifEnv *, ERL_NIF_TERM, void*, ErlNifPid*, void *))fn_work_ ## decl ; \
req->fn_post = (void (*)(void *))fn_post_ ## decl; \
anif_enqueue_req(req); \
async_nif_enqueue_req(req); \
return enif_make_tuple2(env, enif_make_atom(env, "ok"), \
enif_make_int(env, anif_req_count)); \
enif_make_tuple2(env, enif_make_atom(env, "enqueued"), \
enif_make_int(env, async_nif_req_count))); \
}
#define ASYNC_NIF_LOAD() if (anif_init() != 0) return -1;
#define ASYNC_NIF_UNLOAD() anif_unload();
#define ASYNC_NIF_UPGRADE() anif_unload();
#define ASYNC_NIF_LOAD() if (async_nif_init() != 0) return -1;
#define ASYNC_NIF_UNLOAD() async_nif_unload();
#define ASYNC_NIF_UPGRADE() async_nif_unload();
#define ASYNC_NIF_PRE_ENV() env_in
#define ASYNC_NIF_PRE_RETURN_CLEANUP() \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free(req); \
enif_free_env(env);
#define ASYNC_NIF_RETURN_BADARG() ASYNC_NIF_PRE_RETURN_CLEANUP(); return enif_make_badarg(env_in);
#define ASYNC_NIF_RETURN_BADARG() return enif_make_badarg(env);
#define ASYNC_NIF_WORK_ENV new_env
#ifndef PULSE_FORCE_USING_PULSE_SEND_HERE
#define ASYNC_NIF_REPLY(msg) enif_send(NULL, pid, env, enif_make_tuple2(env, ref, msg))
#else
#define ASYNC_NIF_REPLY(msg) PULSE_SEND(NULL, pid, env, enif_make_tuple2(env, ref, msg))
#endif
static void anif_enqueue_req(struct anif_req_entry *r)
static void async_nif_enqueue_req(struct async_nif_req_entry *r)
{
/* Add the request to the work queue. */
enif_mutex_lock(anif_req_mutex);
STAILQ_INSERT_TAIL(&anif_reqs, r, entries);
anif_req_count++;
enif_mutex_unlock(anif_req_mutex);
enif_cond_broadcast(anif_cnd);
enif_mutex_lock(async_nif_req_mutex);
STAILQ_INSERT_TAIL(&async_nif_reqs, r, entries);
async_nif_req_count++;
enif_mutex_unlock(async_nif_req_mutex);
enif_cond_broadcast(async_nif_cnd);
}
static void *anif_worker_fn(void *arg)
static void *async_nif_worker_fn(void *arg)
{
struct anif_worker_entry *worker = (struct anif_worker_entry *)arg;
struct anif_req_entry *req = NULL;
struct async_nif_worker_entry *worker = (struct async_nif_worker_entry *)arg;
struct async_nif_req_entry *req = NULL;
/*
* Workers are active while there is work on the queue to do and
@ -186,39 +151,36 @@ static void *anif_worker_fn(void *arg)
*/
for(;;) {
/* Examine the request queue, are there things to be done? */
enif_mutex_lock(anif_req_mutex);
enif_mutex_lock(anif_worker_mutex);
LIST_INSERT_HEAD(&anif_idle_workers, worker, entries);
enif_mutex_unlock(anif_worker_mutex);
enif_mutex_lock(async_nif_req_mutex);
enif_mutex_lock(async_nif_worker_mutex);
LIST_INSERT_HEAD(&async_nif_idle_workers, worker, entries);
enif_mutex_unlock(async_nif_worker_mutex);
check_again_for_work:
if (anif_shutdown) { enif_mutex_unlock(anif_req_mutex); break; }
if ((req = STAILQ_FIRST(&anif_reqs)) == NULL) {
if (async_nif_shutdown) { enif_mutex_unlock(async_nif_req_mutex); break; }
if ((req = STAILQ_FIRST(&async_nif_reqs)) == NULL) {
/* Queue is empty, join the list of idle workers and wait for work */
enif_cond_wait(anif_cnd, anif_req_mutex);
enif_cond_wait(async_nif_cnd, async_nif_req_mutex);
goto check_again_for_work;
} else {
/* `req` is our work request and we hold the lock. */
enif_cond_broadcast(anif_cnd);
enif_cond_broadcast(async_nif_cnd);
/* Take the request off the queue. */
STAILQ_REMOVE(&anif_reqs, req, anif_req_entry, entries); anif_req_count--;
STAILQ_REMOVE(&async_nif_reqs, req, async_nif_req_entry, entries); async_nif_req_count--;
/* Now we need to remove this thread from the list of idle threads. */
enif_mutex_lock(anif_worker_mutex);
enif_mutex_lock(async_nif_worker_mutex);
LIST_REMOVE(worker, entries);
/* Release the locks in reverse order that we acquired them,
so as not to self-deadlock. */
enif_mutex_unlock(anif_worker_mutex);
enif_mutex_unlock(anif_req_mutex);
enif_mutex_unlock(async_nif_worker_mutex);
enif_mutex_unlock(async_nif_req_mutex);
/* Finally, let's do the work! :) */
ErlNifPid pid;
enif_get_local_pid(req->env, req->pid, &pid);
req->fn_work(req->env, req->ref, req->priv_data, &pid, req->args);
req->fn_work(req->env, req->ref, req->priv_data, &req->pid, req->args);
req->fn_post(req->args);
enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env);
enif_free(req);
}
@ -227,74 +189,89 @@ static void *anif_worker_fn(void *arg)
return 0;
}
static void anif_unload(void)
static void async_nif_unload(void)
{
unsigned int i;
/* Signal the worker threads, stop what you're doing and exit. */
enif_mutex_lock(anif_req_mutex);
anif_shutdown = 1;
enif_cond_broadcast(anif_cnd);
enif_mutex_unlock(anif_req_mutex);
enif_mutex_lock(async_nif_req_mutex);
async_nif_shutdown = 1;
enif_cond_broadcast(async_nif_cnd);
enif_mutex_unlock(async_nif_req_mutex);
/* Join for the now exiting worker threads. */
for (i = 0; i < ANIF_MAX_WORKERS; ++i) {
for (i = 0; i < ASYNC_NIF_MAX_WORKERS; ++i) {
void *exit_value = 0; /* Ignore this. */
enif_thread_join(anif_worker_entries[i].tid, &exit_value);
enif_thread_join(async_nif_worker_entries[i].tid, &exit_value);
}
/* We won't get here until all threads have exited.
Patch things up, and carry on. */
enif_mutex_lock(anif_req_mutex);
enif_mutex_lock(async_nif_req_mutex);
/* Worker threads are stopped, now toss anything left in the queue. */
struct anif_req_entry *req = NULL;
STAILQ_FOREACH(req, &anif_reqs, entries) {
STAILQ_REMOVE(&anif_reqs, STAILQ_LAST(&anif_reqs, anif_req_entry, entries),
anif_req_entry, entries);
ErlNifPid pid;
enif_get_local_pid(req->env, req->pid, &pid);
enif_send(NULL, &pid, req->env,
struct async_nif_req_entry *req = NULL;
STAILQ_FOREACH(req, &async_nif_reqs, entries) {
STAILQ_REMOVE(&async_nif_reqs, STAILQ_LAST(&async_nif_reqs, async_nif_req_entry, entries),
async_nif_req_entry, entries);
#ifdef PULSE
PULSE_SEND(NULL, &req->pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown")));
#else
enif_send(NULL, &req->pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown")));
#endif
req->fn_post(req->args);
enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env);
enif_free(req);
anif_req_count--;
async_nif_req_count--;
}
enif_mutex_unlock(anif_req_mutex);
enif_mutex_unlock(async_nif_req_mutex);
enif_cond_destroy(anif_cnd);
/* Not strictly necessary. */
memset(anif_worker_entries, sizeof(struct anif_worker_entry) * ANIF_MAX_WORKERS, 0);
enif_mutex_destroy(anif_req_mutex); anif_req_mutex = NULL;
enif_mutex_destroy(anif_worker_mutex); anif_worker_mutex = NULL;
memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
enif_cond_destroy(async_nif_cnd); async_nif_cnd = NULL;
enif_mutex_destroy(async_nif_req_mutex); async_nif_req_mutex = NULL;
enif_mutex_destroy(async_nif_worker_mutex); async_nif_worker_mutex = NULL;
}
static int anif_init(void)
static int async_nif_init(void)
{
unsigned int i;
int i;
/* Don't init more than once. */
if (anif_req_mutex) return 0;
if (async_nif_req_mutex) return 0;
anif_req_mutex = enif_mutex_create(NULL);
anif_worker_mutex = enif_mutex_create(NULL);
anif_cnd = enif_cond_create(NULL);
async_nif_req_mutex = enif_mutex_create(NULL);
async_nif_worker_mutex = enif_mutex_create(NULL);
async_nif_cnd = enif_cond_create(NULL);
/* Setup the requests management. */
anif_req_count = 0;
async_nif_req_count = 0;
/* Setup the thread pool management. */
enif_mutex_lock(anif_worker_mutex);
for (i = 0; i < ANIF_MAX_WORKERS; i++) {
enif_thread_create(NULL, &anif_worker_entries[i].tid,
&anif_worker_fn, (void*)&anif_worker_entries[i], NULL);
enif_mutex_lock(async_nif_worker_mutex);
memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
for (i = 0; i < ASYNC_NIF_MAX_WORKERS; i++) {
if (enif_thread_create(NULL, &async_nif_worker_entries[i].tid,
&async_nif_worker_fn, (void*)&async_nif_worker_entries[i], NULL) != 0) {
async_nif_shutdown = 1;
enif_cond_broadcast(async_nif_cnd);
enif_mutex_unlock(async_nif_worker_mutex);
while(i-- > 0) {
void *exit_value = 0; /* Ignore this. */
enif_thread_join(async_nif_worker_entries[i].tid, &exit_value);
}
enif_mutex_unlock(anif_worker_mutex);
memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
enif_cond_destroy(async_nif_cnd); async_nif_cnd = NULL;
enif_mutex_destroy(async_nif_req_mutex); async_nif_req_mutex = NULL;
enif_mutex_destroy(async_nif_worker_mutex); async_nif_worker_mutex = NULL;
return -1;
}
}
enif_mutex_unlock(async_nif_worker_mutex);
return 0;
}

View file

@ -1,9 +1,4 @@
/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@ -15,10 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
@ -36,16 +27,25 @@
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $
* $FreeBSD: src/sys/sys/queue.h,v 1.75.2.3 2012/11/17 11:37:26 svnexp Exp $
*/
#ifndef _DB_QUEUE_H_
#define _DB_QUEUE_H_
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
#if defined(__cplusplus)
extern "C" {
#ifndef __offsetof
#define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif
#ifndef __containerof
#define __containerof(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - __offsetof(type,member) );})
#endif
#include <sys/cdefs.h>
/*
* This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues.
@ -76,7 +76,7 @@ extern "C" {
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
* may be traversed in either direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
@ -96,96 +96,24 @@ extern "C" {
* _EMPTY + + + +
* _FIRST + + + +
* _NEXT + + + +
* _PREV - - - +
* _PREV - + - +
* _LAST - - + +
* _FOREACH + + + +
* _FOREACH_SAFE + + + +
* _FOREACH_REVERSE - - - +
* _FOREACH_REVERSE_SAFE - - - +
* _INSERT_HEAD + + + +
* _INSERT_BEFORE - + - +
* _INSERT_AFTER + + + +
* _INSERT_TAIL - - + +
* _CONCAT - - + +
* _REMOVE_AFTER + - + -
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
* _SWAP + + + +
*
*/
/*
* XXX
* We #undef all of the macros because there are incompatible versions of this
* file and these macros on various systems. What makes the problem worse is
* they are included and/or defined by system include files which we may have
* already loaded into Berkeley DB before getting here. For example, FreeBSD's
* <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
* several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these
* same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours.
*/
#undef LIST_EMPTY
#undef LIST_ENTRY
#undef LIST_FIRST
#undef LIST_FOREACH
#undef LIST_HEAD
#undef LIST_HEAD_INITIALIZER
#undef LIST_INIT
#undef LIST_INSERT_AFTER
#undef LIST_INSERT_BEFORE
#undef LIST_INSERT_HEAD
#undef LIST_NEXT
#undef LIST_REMOVE
#undef QMD_TRACE_ELEM
#undef QMD_TRACE_HEAD
#undef QUEUE_MACRO_DEBUG
#undef SLIST_EMPTY
#undef SLIST_ENTRY
#undef SLIST_FIRST
#undef SLIST_FOREACH
#undef SLIST_FOREACH_PREVPTR
#undef SLIST_HEAD
#undef SLIST_HEAD_INITIALIZER
#undef SLIST_INIT
#undef SLIST_INSERT_AFTER
#undef SLIST_INSERT_HEAD
#undef SLIST_NEXT
#undef SLIST_REMOVE
#undef SLIST_REMOVE_HEAD
#undef STAILQ_CONCAT
#undef STAILQ_EMPTY
#undef STAILQ_ENTRY
#undef STAILQ_FIRST
#undef STAILQ_FOREACH
#undef STAILQ_HEAD
#undef STAILQ_HEAD_INITIALIZER
#undef STAILQ_INIT
#undef STAILQ_INSERT_AFTER
#undef STAILQ_INSERT_HEAD
#undef STAILQ_INSERT_TAIL
#undef STAILQ_LAST
#undef STAILQ_NEXT
#undef STAILQ_REMOVE
#undef STAILQ_REMOVE_HEAD
#undef STAILQ_REMOVE_HEAD_UNTIL
#undef TAILQ_CONCAT
#undef TAILQ_EMPTY
#undef TAILQ_ENTRY
#undef TAILQ_FIRST
#undef TAILQ_FOREACH
#undef TAILQ_FOREACH_REVERSE
#undef TAILQ_HEAD
#undef TAILQ_HEAD_INITIALIZER
#undef TAILQ_INIT
#undef TAILQ_INSERT_AFTER
#undef TAILQ_INSERT_BEFORE
#undef TAILQ_INSERT_HEAD
#undef TAILQ_INSERT_TAIL
#undef TAILQ_LAST
#undef TAILQ_NEXT
#undef TAILQ_PREV
#undef TAILQ_REMOVE
#undef TRACEBUF
#undef TRASHIT
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
#ifdef QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */
struct qm_trace {
char * lastfile;
@ -196,6 +124,7 @@ struct qm_trace {
#define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
#define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \
@ -214,6 +143,7 @@ struct qm_trace {
#else
#define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head)
#define QMD_SAVELINK(name, link)
#define TRACEBUF
#define TRASHIT(x)
#endif /* QUEUE_MACRO_DEBUG */
@ -246,6 +176,11 @@ struct { \
(var); \
(var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = SLIST_FIRST((head)); \
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
(var) = (tvar))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
@ -268,24 +203,34 @@ struct { \
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
if (SLIST_FIRST((head)) == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = SLIST_FIRST((head)); \
while (curelm != NULL && \
SLIST_NEXT(curelm, field) != (elm)) \
while (SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \
if (curelm != NULL) \
SLIST_NEXT(curelm, field) = \
SLIST_NEXT(SLIST_NEXT(curelm, field), field);\
SLIST_REMOVE_AFTER(curelm, field); \
} \
TRASHIT(*oldnext); \
} while (0)
#define SLIST_REMOVE_AFTER(elm, field) do { \
SLIST_NEXT(elm, field) = \
SLIST_NEXT(SLIST_NEXT(elm, field), field); \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
#define SLIST_SWAP(head1, head2, type) do { \
struct type *swap_first = SLIST_FIRST(head1); \
SLIST_FIRST(head1) = SLIST_FIRST(head2); \
SLIST_FIRST(head2) = swap_first; \
} while (0)
/*
* Singly-linked Tail queue declarations.
*/
@ -323,6 +268,12 @@ struct { \
(var); \
(var) = STAILQ_NEXT((var), field))
#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = STAILQ_FIRST((head)); \
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
@ -347,14 +298,13 @@ struct { \
} while (0)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \
NULL : \
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
(STAILQ_EMPTY((head)) ? NULL : \
__containerof((head)->stqh_last, struct type, field.stqe_next))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
if (STAILQ_FIRST((head)) == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} \
@ -362,10 +312,15 @@ struct { \
struct type *curelm = STAILQ_FIRST((head)); \
while (STAILQ_NEXT(curelm, field) != (elm)) \
curelm = STAILQ_NEXT(curelm, field); \
if ((STAILQ_NEXT(curelm, field) = \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
STAILQ_REMOVE_AFTER(head, curelm, field); \
} \
TRASHIT(*oldnext); \
} while (0)
#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
if ((STAILQ_NEXT(elm, field) = \
STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
@ -374,11 +329,20 @@ struct { \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
#define STAILQ_SWAP(head1, head2, type) do { \
struct type *swap_first = STAILQ_FIRST(head1); \
struct type **swap_last = (head1)->stqh_last; \
STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_FIRST(head2) = swap_first; \
(head2)->stqh_last = swap_last; \
if (STAILQ_EMPTY(head1)) \
(head1)->stqh_last = &STAILQ_FIRST(head1); \
if (STAILQ_EMPTY(head2)) \
(head2)->stqh_last = &STAILQ_FIRST(head2); \
} while (0)
/*
* List declarations.
*/
@ -400,6 +364,31 @@ struct { \
* List functions.
*/
#if (defined(_KERNEL) && defined(INVARIANTS))
#define QMD_LIST_CHECK_HEAD(head, field) do { \
if (LIST_FIRST((head)) != NULL && \
LIST_FIRST((head))->field.le_prev != \
&LIST_FIRST((head))) \
panic("Bad list head %p first->prev != head", (head)); \
} while (0)
#define QMD_LIST_CHECK_NEXT(elm, field) do { \
if (LIST_NEXT((elm), field) != NULL && \
LIST_NEXT((elm), field)->field.le_prev != \
&((elm)->field.le_next)) \
panic("Bad link elm %p next->prev != elm", (elm)); \
} while (0)
#define QMD_LIST_CHECK_PREV(elm, field) do { \
if (*(elm)->field.le_prev != (elm)) \
panic("Bad link elm %p prev->next != elm", (elm)); \
} while (0)
#else
#define QMD_LIST_CHECK_HEAD(head, field)
#define QMD_LIST_CHECK_NEXT(elm, field)
#define QMD_LIST_CHECK_PREV(elm, field)
#endif /* (_KERNEL && INVARIANTS) */
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
@ -409,11 +398,17 @@ struct { \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
(var) = (tvar))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
QMD_LIST_CHECK_NEXT(listelm, field); \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
LIST_NEXT((listelm), field)->field.le_prev = \
&LIST_NEXT((elm), field); \
@ -422,6 +417,7 @@ struct { \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
QMD_LIST_CHECK_PREV(listelm, field); \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
@ -429,6 +425,7 @@ struct { \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
QMD_LIST_CHECK_HEAD((head), field); \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
LIST_FIRST((head)) = (elm); \
@ -437,11 +434,31 @@ struct { \
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_PREV(elm, head, type, field) \
((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \
__containerof((elm)->field.le_prev, struct type, field.le_next))
#define LIST_REMOVE(elm, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.le_next); \
QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
QMD_LIST_CHECK_NEXT(elm, field); \
QMD_LIST_CHECK_PREV(elm, field); \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
TRASHIT(*oldnext); \
TRASHIT(*oldprev); \
} while (0)
#define LIST_SWAP(head1, head2, type, field) do { \
struct type *swap_tmp = LIST_FIRST((head1)); \
LIST_FIRST((head1)) = LIST_FIRST((head2)); \
LIST_FIRST((head2)) = swap_tmp; \
if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
} while (0)
/*
@ -467,13 +484,44 @@ struct { \
/*
* Tail queue functions.
*/
#if (defined(_KERNEL) && defined(INVARIANTS))
#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
if (!TAILQ_EMPTY(head) && \
TAILQ_FIRST((head))->field.tqe_prev != \
&TAILQ_FIRST((head))) \
panic("Bad tailq head %p first->prev != head", (head)); \
} while (0)
#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
if (*(head)->tqh_last != NULL) \
panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
} while (0)
#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
if (TAILQ_NEXT((elm), field) != NULL && \
TAILQ_NEXT((elm), field)->field.tqe_prev != \
&((elm)->field.tqe_next)) \
panic("Bad link elm %p next->prev != elm", (elm)); \
} while (0)
#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
if (*(elm)->field.tqe_prev != (elm)) \
panic("Bad link elm %p prev->next != elm", (elm)); \
} while (0)
#else
#define QMD_TAILQ_CHECK_HEAD(head, field)
#define QMD_TAILQ_CHECK_TAIL(head, headname)
#define QMD_TAILQ_CHECK_NEXT(elm, field)
#define QMD_TAILQ_CHECK_PREV(elm, field)
#endif /* (_KERNEL && INVARIANTS) */
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_HEAD(head1); \
QMD_TRACE_HEAD(head2); \
} \
} while (0)
@ -487,11 +535,21 @@ struct { \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = TAILQ_LAST((head), headname); \
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
(var) = (tvar))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
@ -499,6 +557,7 @@ struct { \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
QMD_TAILQ_CHECK_NEXT(listelm, field); \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
TAILQ_NEXT((elm), field)->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
@ -513,6 +572,7 @@ struct { \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
QMD_TAILQ_CHECK_PREV(listelm, field); \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
@ -522,6 +582,7 @@ struct { \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
QMD_TAILQ_CHECK_HEAD(head, field); \
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
@ -534,6 +595,7 @@ struct { \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
QMD_TAILQ_CHECK_TAIL(head, field); \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
@ -551,6 +613,10 @@ struct { \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
QMD_TAILQ_CHECK_NEXT(elm, field); \
QMD_TAILQ_CHECK_PREV(elm, field); \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
@ -559,12 +625,26 @@ struct { \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
TRASHIT(*oldnext); \
TRASHIT(*oldprev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#if defined(__cplusplus)
}
#endif
#endif /* !_DB_QUEUE_H_ */
#define TAILQ_SWAP(head1, head2, type, field) do { \
struct type *swap_first = (head1)->tqh_first; \
struct type **swap_last = (head1)->tqh_last; \
(head1)->tqh_first = (head2)->tqh_first; \
(head1)->tqh_last = (head2)->tqh_last; \
(head2)->tqh_first = swap_first; \
(head2)->tqh_last = swap_last; \
if ((swap_first = (head1)->tqh_first) != NULL) \
swap_first->field.tqe_prev = &(head1)->tqh_first; \
else \
(head1)->tqh_last = &(head1)->tqh_first; \
if ((swap_first = (head2)->tqh_first) != NULL) \
swap_first->field.tqe_prev = &(head2)->tqh_first; \
else \
(head2)->tqh_last = &(head2)->tqh_first; \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

View file

@ -72,6 +72,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
},
{ // work
@ -194,6 +195,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -222,6 +224,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -252,6 +255,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[3], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[3]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -280,6 +284,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -306,6 +311,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -334,6 +340,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -364,6 +371,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -392,6 +400,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -420,6 +429,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->key))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -460,6 +470,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->key))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -515,6 +526,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[3], &args->value))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[3]);
enif_keep_resource((void*)args->session_handle);
},
{ // work
@ -812,6 +825,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->key))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->cursor_handle);
},
{ // work
@ -845,6 +859,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->key))) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->cursor_handle);
},
{ // work
@ -906,6 +921,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle);
},
{ // work
@ -944,6 +961,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle);
},
{ // work
@ -982,6 +1001,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG();
}
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle);
},
{ // work

View file

@ -25,11 +25,11 @@
begin
NIFRef = erlang:make_ref(),
case erlang:apply(Fun, [NIFRef|Args]) of
{ok, QDepth} ->
erlang:bump_reductions(100 * QDepth),
{ok, {enqueued, QDepth}} ->
[erlang:bump_reductions(10 * QDepth) || is_integer(QDepth), QDepth > 100],
receive
{NIFRef, {error, shutdown}=Error} ->
%% Work unit was not executed, requeue it?
%% Work unit was queued, but not executed.
Error;
{NIFRef, {error, _Reason}=Error} ->
%% Work unit returned an error.