Use async nifs technique to avoid blocking the BEAM's schedulers #1

Closed
gburd wants to merge 15 commits from gsb-async-nifs into master
5 changed files with 616 additions and 535 deletions
Showing only changes of commit c0231923f9 - Show all commits

View file

@ -52,6 +52,9 @@ repl:
gdb-repl: gdb-repl:
USE_GDB=1 $(ERL) -pz deps/*/ebin -pa ebin USE_GDB=1 $(ERL) -pz deps/*/ebin -pa ebin
eunit-repl:
erl -pa .eunit -pz deps/*/ebin -pz ebin -exec 'cd(".eunit").'
# NOTES # NOTES
# #

View file

@ -34,151 +34,116 @@ extern "C" {
system you must do the following before launching the process. system you must do the following before launching the process.
echo 1000000 > /proc/sys/kernel/threads-max echo 1000000 > /proc/sys/kernel/threads-max
and for all UNIX systems there will be ulimit maximums. */ and for all UNIX systems there will be ulimit maximums. */
#ifndef ANIF_MAX_WORKERS #ifndef ASYNC_NIF_MAX_WORKERS
#define ANIF_MAX_WORKERS 64 #define ASYNC_NIF_MAX_WORKERS 64
#endif #endif
#ifndef __offsetof
#define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif
#include "queue.h" #include "queue.h"
struct anif_req_entry { struct async_nif_req_entry {
ERL_NIF_TERM pid;
ErlNifEnv *env;
ERL_NIF_TERM ref, *argv; ERL_NIF_TERM ref, *argv;
ErlNifEnv *env;
ErlNifPid pid;
void *args; void *args;
void *priv_data; void *priv_data;
void (*fn_work)(ErlNifEnv*, ERL_NIF_TERM, void *, ErlNifPid*, void *); void (*fn_work)(ErlNifEnv*, ERL_NIF_TERM, void *, ErlNifPid*, void *);
void (*fn_post)(void *); void (*fn_post)(void *);
STAILQ_ENTRY(anif_req_entry) entries; STAILQ_ENTRY(async_nif_req_entry) entries;
}; };
STAILQ_HEAD(reqs, anif_req_entry) anif_reqs = STAILQ_HEAD_INITIALIZER(anif_reqs); STAILQ_HEAD(reqs, async_nif_req_entry) async_nif_reqs = STAILQ_HEAD_INITIALIZER(async_nif_reqs);
struct anif_worker_entry { struct async_nif_worker_entry {
ErlNifTid tid; ErlNifTid tid;
LIST_ENTRY(anif_worker_entry) entries; LIST_ENTRY(async_nif_worker_entry) entries;
}; };
LIST_HEAD(idle_workers, anif_worker_entry) anif_idle_workers = LIST_HEAD_INITIALIZER(anif_worker); LIST_HEAD(idle_workers, async_nif_worker_entry) async_nif_idle_workers = LIST_HEAD_INITIALIZER(async_nif_worker);
static volatile unsigned int async_nif_req_count = 0;
static volatile unsigned int async_nif_shutdown = 0;
static ErlNifMutex *async_nif_req_mutex = NULL;
static ErlNifMutex *async_nif_worker_mutex = NULL;
static ErlNifCond *async_nif_cnd = NULL;
static struct async_nif_worker_entry async_nif_worker_entries[ASYNC_NIF_MAX_WORKERS];
static volatile unsigned int anif_req_count = 0;
static volatile unsigned int anif_shutdown = 0;
static ErlNifMutex *anif_req_mutex = NULL;
static ErlNifMutex *anif_worker_mutex = NULL;
static ErlNifCond *anif_cnd = NULL;
static struct anif_worker_entry anif_worker_entries[ANIF_MAX_WORKERS];
#define ASYNC_NIF_DECL(decl, frame, pre_block, work_block, post_block) \ #define ASYNC_NIF_DECL(decl, frame, pre_block, work_block, post_block) \
struct decl ## _args frame; \ struct decl ## _args frame; \
static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, void *priv_data, ErlNifPid *pid, struct decl ## _args *args) \ static void fn_work_ ## decl (ErlNifEnv *env, ERL_NIF_TERM ref, void *priv_data, ErlNifPid *pid, struct decl ## _args *args) work_block \
work_block \
static void fn_post_ ## decl (struct decl ## _args *args) { \ static void fn_post_ ## decl (struct decl ## _args *args) { \
do post_block while(0); \ do post_block while(0); \
} \ } \
static ERL_NIF_TERM decl(ErlNifEnv* env_in, int argc_in, const ERL_NIF_TERM argv_in[]) { \ static ERL_NIF_TERM decl(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv_in[]) { \
struct decl ## _args on_stack_args; \ struct decl ## _args on_stack_args; \
struct decl ## _args *args = &on_stack_args; \ struct decl ## _args *args = &on_stack_args; \
struct decl ## _args *copy_of_args; \ struct decl ## _args *copy_of_args; \
struct anif_req_entry *req = NULL; \ struct async_nif_req_entry *req = NULL; \
ErlNifPid pid_in; \ ErlNifEnv *new_env = NULL; \
ErlNifEnv *env = NULL; \ /* argv[0] is a ref used for selective recv */ \
ERL_NIF_TERM ref; \ const ERL_NIF_TERM *argv = argv_in + 1; \
ERL_NIF_TERM *argv = NULL; \ argc--; \
int __i = 0, argc = argc_in - 1; \ if (async_nif_shutdown) \
enif_self(env_in, &pid_in); \ return enif_make_tuple2(env, enif_make_atom(env, "error"), \
if (anif_shutdown) { \ enif_make_atom(env, "shutdown")); \
enif_send(NULL, &pid_in, env_in, \ if (!(new_env = enif_alloc_env())) { \
enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \ return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env_in, "shutdown"))); \ enif_make_atom(env, "enomem")); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "shutdown")); \
} \ } \
env = enif_alloc_env(); \ do pre_block while(0); \
if (!env) \ req = (struct async_nif_req_entry*)enif_alloc(sizeof(struct async_nif_req_entry)); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \ if (!req) { \
enif_make_atom(env_in, "enomem")); \ fn_post_ ## decl (args); \
ref = enif_make_copy(env, argv_in[0]); \ enif_free_env(new_env); \
if (!ref) { \ return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_free_env(env); \ enif_make_atom(env, "enomem")); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \ } \
copy_of_args = (struct decl ## _args *)enif_alloc(sizeof(struct decl ## _args)); \ copy_of_args = (struct decl ## _args *)enif_alloc(sizeof(struct decl ## _args)); \
if (!copy_of_args) { \ if (!copy_of_args) { \
enif_free_env(env); \ fn_post_ ## decl (args); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \ enif_free_env(new_env); \
enif_make_atom(env_in, "enomem")); \ return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "enomem")); \
} \ } \
argv = (ERL_NIF_TERM *)enif_alloc((sizeof(ERL_NIF_TERM) * argc)); \
if (!argv) { \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
req = (struct anif_req_entry*)enif_alloc(sizeof(struct anif_req_entry)); \
if (!req) { \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
for (__i = 0; __i < argc; __i++) { \
argv[__i] = enif_make_copy(env, argv_in[(__i + 1)]); \
if (!argv[__i]) { \
enif_free(req); \
enif_free(argv); \
enif_free(copy_of_args); \
enif_free_env(env); \
return enif_make_tuple2(env_in, enif_make_atom(env_in, "error"), \
enif_make_atom(env_in, "enomem")); \
} \
} \
do pre_block while(0); \
memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \ memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \
req->ref = ref; \ req->env = new_env; \
req->pid = enif_make_pid(env, &pid_in); \ req->ref = enif_make_copy(new_env, argv_in[0]); \
enif_self(env, &req->pid); \
req->args = (void*)copy_of_args; \ req->args = (void*)copy_of_args; \
req->argv = argv; \ req->priv_data = enif_priv_data(env); \
req->env = env; \
req->priv_data = enif_priv_data(env_in); \
req->fn_work = (void (*)(ErlNifEnv *, ERL_NIF_TERM, void*, ErlNifPid*, void *))fn_work_ ## decl ; \ req->fn_work = (void (*)(ErlNifEnv *, ERL_NIF_TERM, void*, ErlNifPid*, void *))fn_work_ ## decl ; \
req->fn_post = (void (*)(void *))fn_post_ ## decl; \ req->fn_post = (void (*)(void *))fn_post_ ## decl; \
anif_enqueue_req(req); \ async_nif_enqueue_req(req); \
return enif_make_tuple2(env, enif_make_atom(env, "ok"), \ return enif_make_tuple2(env, enif_make_atom(env, "ok"), \
enif_make_int(env, anif_req_count)); \ enif_make_tuple2(env, enif_make_atom(env, "enqueued"), \
enif_make_int(env, async_nif_req_count))); \
} }
#define ASYNC_NIF_LOAD() if (anif_init() != 0) return -1; #define ASYNC_NIF_LOAD() if (async_nif_init() != 0) return -1;
#define ASYNC_NIF_UNLOAD() anif_unload(); #define ASYNC_NIF_UNLOAD() async_nif_unload();
#define ASYNC_NIF_UPGRADE() anif_unload(); #define ASYNC_NIF_UPGRADE() async_nif_unload();
#define ASYNC_NIF_PRE_ENV() env_in #define ASYNC_NIF_RETURN_BADARG() return enif_make_badarg(env);
#define ASYNC_NIF_PRE_RETURN_CLEANUP() \ #define ASYNC_NIF_WORK_ENV new_env
enif_free(argv); \
enif_free(copy_of_args); \
enif_free(req); \
enif_free_env(env);
#define ASYNC_NIF_RETURN_BADARG() ASYNC_NIF_PRE_RETURN_CLEANUP(); return enif_make_badarg(env_in);
#ifndef PULSE_FORCE_USING_PULSE_SEND_HERE
#define ASYNC_NIF_REPLY(msg) enif_send(NULL, pid, env, enif_make_tuple2(env, ref, msg)) #define ASYNC_NIF_REPLY(msg) enif_send(NULL, pid, env, enif_make_tuple2(env, ref, msg))
#else
#define ASYNC_NIF_REPLY(msg) PULSE_SEND(NULL, pid, env, enif_make_tuple2(env, ref, msg))
#endif
static void anif_enqueue_req(struct anif_req_entry *r) static void async_nif_enqueue_req(struct async_nif_req_entry *r)
{ {
/* Add the request to the work queue. */ /* Add the request to the work queue. */
enif_mutex_lock(anif_req_mutex); enif_mutex_lock(async_nif_req_mutex);
STAILQ_INSERT_TAIL(&anif_reqs, r, entries); STAILQ_INSERT_TAIL(&async_nif_reqs, r, entries);
anif_req_count++; async_nif_req_count++;
enif_mutex_unlock(anif_req_mutex); enif_mutex_unlock(async_nif_req_mutex);
enif_cond_broadcast(anif_cnd); enif_cond_broadcast(async_nif_cnd);
} }
static void *anif_worker_fn(void *arg) static void *async_nif_worker_fn(void *arg)
{ {
struct anif_worker_entry *worker = (struct anif_worker_entry *)arg; struct async_nif_worker_entry *worker = (struct async_nif_worker_entry *)arg;
struct anif_req_entry *req = NULL; struct async_nif_req_entry *req = NULL;
/* /*
* Workers are active while there is work on the queue to do and * Workers are active while there is work on the queue to do and
@ -186,39 +151,36 @@ static void *anif_worker_fn(void *arg)
*/ */
for(;;) { for(;;) {
/* Examine the request queue, are there things to be done? */ /* Examine the request queue, are there things to be done? */
enif_mutex_lock(anif_req_mutex); enif_mutex_lock(async_nif_req_mutex);
enif_mutex_lock(anif_worker_mutex); enif_mutex_lock(async_nif_worker_mutex);
LIST_INSERT_HEAD(&anif_idle_workers, worker, entries); LIST_INSERT_HEAD(&async_nif_idle_workers, worker, entries);
enif_mutex_unlock(anif_worker_mutex); enif_mutex_unlock(async_nif_worker_mutex);
check_again_for_work: check_again_for_work:
if (anif_shutdown) { enif_mutex_unlock(anif_req_mutex); break; } if (async_nif_shutdown) { enif_mutex_unlock(async_nif_req_mutex); break; }
if ((req = STAILQ_FIRST(&anif_reqs)) == NULL) { if ((req = STAILQ_FIRST(&async_nif_reqs)) == NULL) {
/* Queue is empty, join the list of idle workers and wait for work */ /* Queue is empty, join the list of idle workers and wait for work */
enif_cond_wait(anif_cnd, anif_req_mutex); enif_cond_wait(async_nif_cnd, async_nif_req_mutex);
goto check_again_for_work; goto check_again_for_work;
} else { } else {
/* `req` is our work request and we hold the lock. */ /* `req` is our work request and we hold the lock. */
enif_cond_broadcast(anif_cnd); enif_cond_broadcast(async_nif_cnd);
/* Take the request off the queue. */ /* Take the request off the queue. */
STAILQ_REMOVE(&anif_reqs, req, anif_req_entry, entries); anif_req_count--; STAILQ_REMOVE(&async_nif_reqs, req, async_nif_req_entry, entries); async_nif_req_count--;
/* Now we need to remove this thread from the list of idle threads. */ /* Now we need to remove this thread from the list of idle threads. */
enif_mutex_lock(anif_worker_mutex); enif_mutex_lock(async_nif_worker_mutex);
LIST_REMOVE(worker, entries); LIST_REMOVE(worker, entries);
/* Release the locks in reverse order that we acquired them, /* Release the locks in reverse order that we acquired them,
so as not to self-deadlock. */ so as not to self-deadlock. */
enif_mutex_unlock(anif_worker_mutex); enif_mutex_unlock(async_nif_worker_mutex);
enif_mutex_unlock(anif_req_mutex); enif_mutex_unlock(async_nif_req_mutex);
/* Finally, let's do the work! :) */ /* Finally, let's do the work! :) */
ErlNifPid pid; req->fn_work(req->env, req->ref, req->priv_data, &req->pid, req->args);
enif_get_local_pid(req->env, req->pid, &pid);
req->fn_work(req->env, req->ref, req->priv_data, &pid, req->args);
req->fn_post(req->args); req->fn_post(req->args);
enif_free(req->args); enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env); enif_free_env(req->env);
enif_free(req); enif_free(req);
} }
@ -227,74 +189,89 @@ static void *anif_worker_fn(void *arg)
return 0; return 0;
} }
static void anif_unload(void) static void async_nif_unload(void)
{ {
unsigned int i; unsigned int i;
/* Signal the worker threads, stop what you're doing and exit. */ /* Signal the worker threads, stop what you're doing and exit. */
enif_mutex_lock(anif_req_mutex); enif_mutex_lock(async_nif_req_mutex);
anif_shutdown = 1; async_nif_shutdown = 1;
enif_cond_broadcast(anif_cnd); enif_cond_broadcast(async_nif_cnd);
enif_mutex_unlock(anif_req_mutex); enif_mutex_unlock(async_nif_req_mutex);
/* Join for the now exiting worker threads. */ /* Join for the now exiting worker threads. */
for (i = 0; i < ANIF_MAX_WORKERS; ++i) { for (i = 0; i < ASYNC_NIF_MAX_WORKERS; ++i) {
void *exit_value = 0; /* Ignore this. */ void *exit_value = 0; /* Ignore this. */
enif_thread_join(anif_worker_entries[i].tid, &exit_value); enif_thread_join(async_nif_worker_entries[i].tid, &exit_value);
} }
/* We won't get here until all threads have exited. /* We won't get here until all threads have exited.
Patch things up, and carry on. */ Patch things up, and carry on. */
enif_mutex_lock(anif_req_mutex); enif_mutex_lock(async_nif_req_mutex);
/* Worker threads are stopped, now toss anything left in the queue. */ /* Worker threads are stopped, now toss anything left in the queue. */
struct anif_req_entry *req = NULL; struct async_nif_req_entry *req = NULL;
STAILQ_FOREACH(req, &anif_reqs, entries) { STAILQ_FOREACH(req, &async_nif_reqs, entries) {
STAILQ_REMOVE(&anif_reqs, STAILQ_LAST(&anif_reqs, anif_req_entry, entries), STAILQ_REMOVE(&async_nif_reqs, STAILQ_LAST(&async_nif_reqs, async_nif_req_entry, entries),
anif_req_entry, entries); async_nif_req_entry, entries);
ErlNifPid pid; #ifdef PULSE
enif_get_local_pid(req->env, req->pid, &pid); PULSE_SEND(NULL, &req->pid, req->env,
enif_send(NULL, &pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"), enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown"))); enif_make_atom(req->env, "shutdown")));
#else
enif_send(NULL, &req->pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown")));
#endif
req->fn_post(req->args); req->fn_post(req->args);
enif_free(req->args); enif_free(req->args);
enif_free(req->argv);
enif_free_env(req->env);
enif_free(req); enif_free(req);
anif_req_count--; async_nif_req_count--;
} }
enif_mutex_unlock(anif_req_mutex); enif_mutex_unlock(async_nif_req_mutex);
enif_cond_destroy(anif_cnd); memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
/* Not strictly necessary. */ enif_cond_destroy(async_nif_cnd); async_nif_cnd = NULL;
memset(anif_worker_entries, sizeof(struct anif_worker_entry) * ANIF_MAX_WORKERS, 0); enif_mutex_destroy(async_nif_req_mutex); async_nif_req_mutex = NULL;
enif_mutex_destroy(async_nif_worker_mutex); async_nif_worker_mutex = NULL;
enif_mutex_destroy(anif_req_mutex); anif_req_mutex = NULL;
enif_mutex_destroy(anif_worker_mutex); anif_worker_mutex = NULL;
} }
static int anif_init(void) static int async_nif_init(void)
{ {
unsigned int i; int i;
/* Don't init more than once. */ /* Don't init more than once. */
if (anif_req_mutex) return 0; if (async_nif_req_mutex) return 0;
anif_req_mutex = enif_mutex_create(NULL); async_nif_req_mutex = enif_mutex_create(NULL);
anif_worker_mutex = enif_mutex_create(NULL); async_nif_worker_mutex = enif_mutex_create(NULL);
anif_cnd = enif_cond_create(NULL); async_nif_cnd = enif_cond_create(NULL);
/* Setup the requests management. */ /* Setup the requests management. */
anif_req_count = 0; async_nif_req_count = 0;
/* Setup the thread pool management. */ /* Setup the thread pool management. */
enif_mutex_lock(anif_worker_mutex); enif_mutex_lock(async_nif_worker_mutex);
for (i = 0; i < ANIF_MAX_WORKERS; i++) { memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
enif_thread_create(NULL, &anif_worker_entries[i].tid,
&anif_worker_fn, (void*)&anif_worker_entries[i], NULL); for (i = 0; i < ASYNC_NIF_MAX_WORKERS; i++) {
if (enif_thread_create(NULL, &async_nif_worker_entries[i].tid,
&async_nif_worker_fn, (void*)&async_nif_worker_entries[i], NULL) != 0) {
async_nif_shutdown = 1;
enif_cond_broadcast(async_nif_cnd);
enif_mutex_unlock(async_nif_worker_mutex);
while(i-- > 0) {
void *exit_value = 0; /* Ignore this. */
enif_thread_join(async_nif_worker_entries[i].tid, &exit_value);
}
memset(async_nif_worker_entries, sizeof(struct async_nif_worker_entry) * ASYNC_NIF_MAX_WORKERS, 0);
enif_cond_destroy(async_nif_cnd); async_nif_cnd = NULL;
enif_mutex_destroy(async_nif_req_mutex); async_nif_req_mutex = NULL;
enif_mutex_destroy(async_nif_worker_mutex); async_nif_worker_mutex = NULL;
return -1;
}
} }
enif_mutex_unlock(anif_worker_mutex); enif_mutex_unlock(async_nif_worker_mutex);
return 0; return 0;
} }

View file

@ -1,11 +1,6 @@
/*- /*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996, 2012 Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright (c) 1991, 1993 * Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved. * The Regents of the University of California. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -15,10 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright * 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the * notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution. * documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors * 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software * may be used to endorse or promote products derived from this software
* without specific prior written permission. * without specific prior written permission.
@ -35,17 +26,26 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* @(#)queue.h 8.5 (Berkeley) 8/20/94 * @(#)queue.h 8.5 (Berkeley) 8/20/94
* $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $ * $FreeBSD: src/sys/sys/queue.h,v 1.75.2.3 2012/11/17 11:37:26 svnexp Exp $
*/ */
#ifndef _DB_QUEUE_H_ #ifndef _SYS_QUEUE_H_
#define _DB_QUEUE_H_ #define _SYS_QUEUE_H_
#if defined(__cplusplus) #ifndef __offsetof
extern "C" { #define __offsetof(st, m) \
((size_t) ( (char *)&((st *)0)->m - (char *)0 ))
#endif #endif
#ifndef __containerof
#define __containerof(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - __offsetof(type,member) );})
#endif
#include <sys/cdefs.h>
/* /*
* This file defines four types of data structures: singly-linked lists, * This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues. * singly-linked tail queues, lists and tail queues.
@ -76,7 +76,7 @@ extern "C" {
* so that an arbitrary element can be removed without a need to * so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before * traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list * or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction. * may be traversed in either direction.
* *
* A tail queue is headed by a pair of pointers, one to the head of the * A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly * list and the other to the tail of the list. The elements are doubly
@ -88,483 +88,563 @@ extern "C" {
* For details on the use of these macros, see the queue(3) manual page. * For details on the use of these macros, see the queue(3) manual page.
* *
* *
* SLIST LIST STAILQ TAILQ * SLIST LIST STAILQ TAILQ
* _HEAD + + + + * _HEAD + + + +
* _HEAD_INITIALIZER + + + + * _HEAD_INITIALIZER + + + +
* _ENTRY + + + + * _ENTRY + + + +
* _INIT + + + + * _INIT + + + +
* _EMPTY + + + + * _EMPTY + + + +
* _FIRST + + + + * _FIRST + + + +
* _NEXT + + + + * _NEXT + + + +
* _PREV - - - + * _PREV - + - +
* _LAST - - + + * _LAST - - + +
* _FOREACH + + + + * _FOREACH + + + +
* _FOREACH_REVERSE - - - + * _FOREACH_SAFE + + + +
* _INSERT_HEAD + + + + * _FOREACH_REVERSE - - - +
* _INSERT_BEFORE - + - + * _FOREACH_REVERSE_SAFE - - - +
* _INSERT_AFTER + + + + * _INSERT_HEAD + + + +
* _INSERT_TAIL - - + + * _INSERT_BEFORE - + - +
* _CONCAT - - + + * _INSERT_AFTER + + + +
* _REMOVE_HEAD + - + - * _INSERT_TAIL - - + +
* _REMOVE + + + + * _CONCAT - - + +
* _REMOVE_AFTER + - + -
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
* _SWAP + + + +
* *
*/ */
#ifdef QUEUE_MACRO_DEBUG
/*
* XXX
* We #undef all of the macros because there are incompatible versions of this
* file and these macros on various systems. What makes the problem worse is
* they are included and/or defined by system include files which we may have
* already loaded into Berkeley DB before getting here. For example, FreeBSD's
* <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
* several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these
* same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours.
*/
#undef LIST_EMPTY
#undef LIST_ENTRY
#undef LIST_FIRST
#undef LIST_FOREACH
#undef LIST_HEAD
#undef LIST_HEAD_INITIALIZER
#undef LIST_INIT
#undef LIST_INSERT_AFTER
#undef LIST_INSERT_BEFORE
#undef LIST_INSERT_HEAD
#undef LIST_NEXT
#undef LIST_REMOVE
#undef QMD_TRACE_ELEM
#undef QMD_TRACE_HEAD
#undef QUEUE_MACRO_DEBUG
#undef SLIST_EMPTY
#undef SLIST_ENTRY
#undef SLIST_FIRST
#undef SLIST_FOREACH
#undef SLIST_FOREACH_PREVPTR
#undef SLIST_HEAD
#undef SLIST_HEAD_INITIALIZER
#undef SLIST_INIT
#undef SLIST_INSERT_AFTER
#undef SLIST_INSERT_HEAD
#undef SLIST_NEXT
#undef SLIST_REMOVE
#undef SLIST_REMOVE_HEAD
#undef STAILQ_CONCAT
#undef STAILQ_EMPTY
#undef STAILQ_ENTRY
#undef STAILQ_FIRST
#undef STAILQ_FOREACH
#undef STAILQ_HEAD
#undef STAILQ_HEAD_INITIALIZER
#undef STAILQ_INIT
#undef STAILQ_INSERT_AFTER
#undef STAILQ_INSERT_HEAD
#undef STAILQ_INSERT_TAIL
#undef STAILQ_LAST
#undef STAILQ_NEXT
#undef STAILQ_REMOVE
#undef STAILQ_REMOVE_HEAD
#undef STAILQ_REMOVE_HEAD_UNTIL
#undef TAILQ_CONCAT
#undef TAILQ_EMPTY
#undef TAILQ_ENTRY
#undef TAILQ_FIRST
#undef TAILQ_FOREACH
#undef TAILQ_FOREACH_REVERSE
#undef TAILQ_HEAD
#undef TAILQ_HEAD_INITIALIZER
#undef TAILQ_INIT
#undef TAILQ_INSERT_AFTER
#undef TAILQ_INSERT_BEFORE
#undef TAILQ_INSERT_HEAD
#undef TAILQ_INSERT_TAIL
#undef TAILQ_LAST
#undef TAILQ_NEXT
#undef TAILQ_PREV
#undef TAILQ_REMOVE
#undef TRACEBUF
#undef TRASHIT
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */ /* Store the last 2 places the queue element or head was altered */
struct qm_trace { struct qm_trace {
char * lastfile; char * lastfile;
int lastline; int lastline;
char * prevfile; char * prevfile;
int prevline; int prevline;
}; };
#define TRACEBUF struct qm_trace trace; #define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)-1;} while (0) #define TRASHIT(x) do {(x) = (void *)-1;} while (0)
#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
#define QMD_TRACE_HEAD(head) do { \ #define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \ (head)->trace.prevline = (head)->trace.lastline; \
(head)->trace.prevfile = (head)->trace.lastfile; \ (head)->trace.prevfile = (head)->trace.lastfile; \
(head)->trace.lastline = __LINE__; \ (head)->trace.lastline = __LINE__; \
(head)->trace.lastfile = __FILE__; \ (head)->trace.lastfile = __FILE__; \
} while (0) } while (0)
#define QMD_TRACE_ELEM(elem) do { \ #define QMD_TRACE_ELEM(elem) do { \
(elem)->trace.prevline = (elem)->trace.lastline; \ (elem)->trace.prevline = (elem)->trace.lastline; \
(elem)->trace.prevfile = (elem)->trace.lastfile; \ (elem)->trace.prevfile = (elem)->trace.lastfile; \
(elem)->trace.lastline = __LINE__; \ (elem)->trace.lastline = __LINE__; \
(elem)->trace.lastfile = __FILE__; \ (elem)->trace.lastfile = __FILE__; \
} while (0) } while (0)
#else #else
#define QMD_TRACE_ELEM(elem) #define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head) #define QMD_TRACE_HEAD(head)
#define TRACEBUF #define QMD_SAVELINK(name, link)
#define TRASHIT(x) #define TRACEBUF
#endif /* QUEUE_MACRO_DEBUG */ #define TRASHIT(x)
#endif /* QUEUE_MACRO_DEBUG */
/* /*
* Singly-linked List declarations. * Singly-linked List declarations.
*/ */
#define SLIST_HEAD(name, type) \ #define SLIST_HEAD(name, type) \
struct name { \ struct name { \
struct type *slh_first; /* first element */ \ struct type *slh_first; /* first element */ \
} }
#define SLIST_HEAD_INITIALIZER(head) \ #define SLIST_HEAD_INITIALIZER(head) \
{ NULL } { NULL }
#define SLIST_ENTRY(type) \ #define SLIST_ENTRY(type) \
struct { \ struct { \
struct type *sle_next; /* next element */ \ struct type *sle_next; /* next element */ \
} }
/* /*
* Singly-linked List functions. * Singly-linked List functions.
*/ */
#define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_FOREACH(var, head, field) \ #define SLIST_FOREACH(var, head, field) \
for ((var) = SLIST_FIRST((head)); \ for ((var) = SLIST_FIRST((head)); \
(var); \ (var); \
(var) = SLIST_NEXT((var), field)) (var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((varp) = &SLIST_FIRST((head)); \ for ((var) = SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
(varp) = &SLIST_NEXT((var), field)) (var) = (tvar))
#define SLIST_INIT(head) do { \ #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
SLIST_FIRST((head)) = NULL; \ for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
(varp) = &SLIST_NEXT((var), field))
#define SLIST_INIT(head) do { \
SLIST_FIRST((head)) = NULL; \
} while (0) } while (0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
SLIST_NEXT((slistelm), field) = (elm); \ SLIST_NEXT((slistelm), field) = (elm); \
} while (0) } while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \ #define SLIST_INSERT_HEAD(head, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
SLIST_FIRST((head)) = (elm); \ SLIST_FIRST((head)) = (elm); \
} while (0) } while (0)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \ #define SLIST_REMOVE(head, elm, type, field) do { \
if (SLIST_FIRST((head)) == (elm)) { \ QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
SLIST_REMOVE_HEAD((head), field); \ if (SLIST_FIRST((head)) == (elm)) { \
} \ SLIST_REMOVE_HEAD((head), field); \
else { \ } \
struct type *curelm = SLIST_FIRST((head)); \ else { \
while (curelm != NULL && \ struct type *curelm = SLIST_FIRST((head)); \
SLIST_NEXT(curelm, field) != (elm)) \ while (SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \ curelm = SLIST_NEXT(curelm, field); \
if (curelm != NULL) \ SLIST_REMOVE_AFTER(curelm, field); \
SLIST_NEXT(curelm, field) = \ } \
SLIST_NEXT(SLIST_NEXT(curelm, field), field);\ TRASHIT(*oldnext); \
} \
} while (0) } while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \ #define SLIST_REMOVE_AFTER(elm, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ SLIST_NEXT(elm, field) = \
SLIST_NEXT(SLIST_NEXT(elm, field), field); \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
#define SLIST_SWAP(head1, head2, type) do { \
struct type *swap_first = SLIST_FIRST(head1); \
SLIST_FIRST(head1) = SLIST_FIRST(head2); \
SLIST_FIRST(head2) = swap_first; \
} while (0) } while (0)
/* /*
* Singly-linked Tail queue declarations. * Singly-linked Tail queue declarations.
*/ */
#define STAILQ_HEAD(name, type) \ #define STAILQ_HEAD(name, type) \
struct name { \ struct name { \
struct type *stqh_first;/* first element */ \ struct type *stqh_first;/* first element */ \
struct type **stqh_last;/* addr of last next element */ \ struct type **stqh_last;/* addr of last next element */ \
} }
#define STAILQ_HEAD_INITIALIZER(head) \ #define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first } { NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \ #define STAILQ_ENTRY(type) \
struct { \ struct { \
struct type *stqe_next; /* next element */ \ struct type *stqe_next; /* next element */ \
} }
/* /*
* Singly-linked Tail queue functions. * Singly-linked Tail queue functions.
*/ */
#define STAILQ_CONCAT(head1, head2) do { \ #define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \ if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \ *(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \ (head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \ STAILQ_INIT((head2)); \
} \ } \
} while (0) } while (0)
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_FOREACH(var, head, field) \ #define STAILQ_FOREACH(var, head, field) \
for ((var) = STAILQ_FIRST((head)); \ for((var) = STAILQ_FIRST((head)); \
(var); \ (var); \
(var) = STAILQ_NEXT((var), field)) (var) = STAILQ_NEXT((var), field))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \ #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
(head)->stqh_last = &STAILQ_FIRST((head)); \ for ((var) = STAILQ_FIRST((head)); \
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0) } while (0)
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((elm), field); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_NEXT((tqelm), field) = (elm); \ STAILQ_NEXT((tqelm), field) = (elm); \
} while (0) } while (0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \ #define STAILQ_INSERT_HEAD(head, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_FIRST((head)) = (elm); \ STAILQ_FIRST((head)) = (elm); \
} while (0) } while (0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \ #define STAILQ_INSERT_TAIL(head, elm, field) do { \
STAILQ_NEXT((elm), field) = NULL; \ STAILQ_NEXT((elm), field) = NULL; \
*(head)->stqh_last = (elm); \ *(head)->stqh_last = (elm); \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0) } while (0)
#define STAILQ_LAST(head, type, field) \ #define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \ (STAILQ_EMPTY((head)) ? NULL : \
NULL : \ __containerof((head)->stqh_last, struct type, field.stqe_next))
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \ #define STAILQ_REMOVE(head, elm, type, field) do { \
if (STAILQ_FIRST((head)) == (elm)) { \ QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
STAILQ_REMOVE_HEAD((head), field); \ if (STAILQ_FIRST((head)) == (elm)) { \
} \ STAILQ_REMOVE_HEAD((head), field); \
else { \ } \
struct type *curelm = STAILQ_FIRST((head)); \ else { \
while (STAILQ_NEXT(curelm, field) != (elm)) \ struct type *curelm = STAILQ_FIRST((head)); \
curelm = STAILQ_NEXT(curelm, field); \ while (STAILQ_NEXT(curelm, field) != (elm)) \
if ((STAILQ_NEXT(curelm, field) = \ curelm = STAILQ_NEXT(curelm, field); \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ STAILQ_REMOVE_AFTER(head, curelm, field); \
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\ } \
} \ TRASHIT(*oldnext); \
} while (0) } while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \ #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = \ if ((STAILQ_NEXT(elm, field) = \
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0) } while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ #define STAILQ_REMOVE_HEAD(head, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ if ((STAILQ_FIRST((head)) = \
(head)->stqh_last = &STAILQ_FIRST((head)); \ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0) } while (0)
#define STAILQ_SWAP(head1, head2, type) do { \
struct type *swap_first = STAILQ_FIRST(head1); \
struct type **swap_last = (head1)->stqh_last; \
STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_FIRST(head2) = swap_first; \
(head2)->stqh_last = swap_last; \
if (STAILQ_EMPTY(head1)) \
(head1)->stqh_last = &STAILQ_FIRST(head1); \
if (STAILQ_EMPTY(head2)) \
(head2)->stqh_last = &STAILQ_FIRST(head2); \
} while (0)
/* /*
* List declarations. * List declarations.
*/ */
#define LIST_HEAD(name, type) \ #define LIST_HEAD(name, type) \
struct name { \ struct name { \
struct type *lh_first; /* first element */ \ struct type *lh_first; /* first element */ \
} }
#define LIST_HEAD_INITIALIZER(head) \ #define LIST_HEAD_INITIALIZER(head) \
{ NULL } { NULL }
#define LIST_ENTRY(type) \ #define LIST_ENTRY(type) \
struct { \ struct { \
struct type *le_next; /* next element */ \ struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \ struct type **le_prev; /* address of previous next element */ \
} }
/* /*
* List functions. * List functions.
*/ */
#define LIST_EMPTY(head) ((head)->lh_first == NULL) #if (defined(_KERNEL) && defined(INVARIANTS))
#define QMD_LIST_CHECK_HEAD(head, field) do { \
#define LIST_FIRST(head) ((head)->lh_first) if (LIST_FIRST((head)) != NULL && \
LIST_FIRST((head))->field.le_prev != \
#define LIST_FOREACH(var, head, field) \ &LIST_FIRST((head))) \
for ((var) = LIST_FIRST((head)); \ panic("Bad list head %p first->prev != head", (head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0) } while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \ #define QMD_LIST_CHECK_NEXT(elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ if (LIST_NEXT((elm), field) != NULL && \
LIST_NEXT((listelm), field)->field.le_prev = \ LIST_NEXT((elm), field)->field.le_prev != \
&LIST_NEXT((elm), field); \ &((elm)->field.le_next)) \
LIST_NEXT((listelm), field) = (elm); \ panic("Bad link elm %p next->prev != elm", (elm)); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0) } while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ #define QMD_LIST_CHECK_PREV(elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \ if (*(elm)->field.le_prev != (elm)) \
LIST_NEXT((elm), field) = (listelm); \ panic("Bad link elm %p prev->next != elm", (elm)); \
*(listelm)->field.le_prev = (elm); \ } while (0)
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \ #else
#define QMD_LIST_CHECK_HEAD(head, field)
#define QMD_LIST_CHECK_NEXT(elm, field)
#define QMD_LIST_CHECK_PREV(elm, field)
#endif /* (_KERNEL && INVARIANTS) */
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_FOREACH(var, head, field) \
for ((var) = LIST_FIRST((head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
(var) = (tvar))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0) } while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \ #define LIST_INSERT_AFTER(listelm, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ QMD_LIST_CHECK_NEXT(listelm, field); \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
LIST_FIRST((head)) = (elm); \ LIST_NEXT((listelm), field)->field.le_prev = \
(elm)->field.le_prev = &LIST_FIRST((head)); \ &LIST_NEXT((elm), field); \
LIST_NEXT((listelm), field) = (elm); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0) } while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
QMD_LIST_CHECK_PREV(listelm, field); \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
} while (0)
#define LIST_REMOVE(elm, field) do { \ #define LIST_INSERT_HEAD(head, elm, field) do { \
if (LIST_NEXT((elm), field) != NULL) \ QMD_LIST_CHECK_HEAD((head), field); \
LIST_NEXT((elm), field)->field.le_prev = \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
(elm)->field.le_prev; \ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
*(elm)->field.le_prev = LIST_NEXT((elm), field); \ LIST_FIRST((head)) = (elm); \
(elm)->field.le_prev = &LIST_FIRST((head)); \
} while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_PREV(elm, head, type, field) \
((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \
__containerof((elm)->field.le_prev, struct type, field.le_next))
#define LIST_REMOVE(elm, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.le_next); \
QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
QMD_LIST_CHECK_NEXT(elm, field); \
QMD_LIST_CHECK_PREV(elm, field); \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
TRASHIT(*oldnext); \
TRASHIT(*oldprev); \
} while (0)
#define LIST_SWAP(head1, head2, type, field) do { \
struct type *swap_tmp = LIST_FIRST((head1)); \
LIST_FIRST((head1)) = LIST_FIRST((head2)); \
LIST_FIRST((head2)) = swap_tmp; \
if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
} while (0) } while (0)
/* /*
* Tail queue declarations. * Tail queue declarations.
*/ */
#define TAILQ_HEAD(name, type) \ #define TAILQ_HEAD(name, type) \
struct name { \ struct name { \
struct type *tqh_first; /* first element */ \ struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \ struct type **tqh_last; /* addr of last next element */ \
TRACEBUF \ TRACEBUF \
} }
#define TAILQ_HEAD_INITIALIZER(head) \ #define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first } { NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \ #define TAILQ_ENTRY(type) \
struct { \ struct { \
struct type *tqe_next; /* next element */ \ struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \ struct type **tqe_prev; /* address of previous next element */ \
TRACEBUF \ TRACEBUF \
} }
/* /*
* Tail queue functions. * Tail queue functions.
*/ */
#define TAILQ_CONCAT(head1, head2, field) do { \ #if (defined(_KERNEL) && defined(INVARIANTS))
if (!TAILQ_EMPTY(head2)) { \ #define QMD_TAILQ_CHECK_HEAD(head, field) do { \
*(head1)->tqh_last = (head2)->tqh_first; \ if (!TAILQ_EMPTY(head) && \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ TAILQ_FIRST((head))->field.tqe_prev != \
(head1)->tqh_last = (head2)->tqh_last; \ &TAILQ_FIRST((head))) \
TAILQ_INIT((head2)); \ panic("Bad tailq head %p first->prev != head", (head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_HEAD(head2); \
} \
} while (0) } while (0)
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define QMD_TAILQ_CHECK_TAIL(head, field) do { \
if (*(head)->tqh_last != NULL) \
#define TAILQ_FIRST(head) ((head)->tqh_first) panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST((head)); \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
} while (0) } while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ #define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ if (TAILQ_NEXT((elm), field) != NULL && \
TAILQ_NEXT((elm), field)->field.tqe_prev = \ TAILQ_NEXT((elm), field)->field.tqe_prev != \
&TAILQ_NEXT((elm), field); \ &((elm)->field.tqe_next)) \
else { \ panic("Bad link elm %p next->prev != elm", (elm)); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0) } while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ #define QMD_TAILQ_CHECK_PREV(elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ if (*(elm)->field.tqe_prev != (elm)) \
TAILQ_NEXT((elm), field) = (listelm); \ panic("Bad link elm %p prev->next != elm", (elm)); \
*(listelm)->field.tqe_prev = (elm); \ } while (0)
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ #else
QMD_TRACE_ELEM(&(elm)->field); \ #define QMD_TAILQ_CHECK_HEAD(head, field)
QMD_TRACE_ELEM(&listelm->field); \ #define QMD_TAILQ_CHECK_TAIL(head, headname)
#define QMD_TAILQ_CHECK_NEXT(elm, field)
#define QMD_TAILQ_CHECK_PREV(elm, field)
#endif /* (_KERNEL && INVARIANTS) */
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
QMD_TRACE_HEAD(head1); \
QMD_TRACE_HEAD(head2); \
} \
} while (0) } while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \ #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \ #define TAILQ_FIRST(head) ((head)->tqh_first)
&TAILQ_NEXT((elm), field); \
else \ #define TAILQ_FOREACH(var, head, field) \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \ for ((var) = TAILQ_FIRST((head)); \
TAILQ_FIRST((head)) = (elm); \ (var); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ (var) = TAILQ_NEXT((var), field))
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \ #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = TAILQ_LAST((head), headname); \
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
(var) = (tvar))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
} while (0) } while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \ #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
TAILQ_NEXT((elm), field) = NULL; \ QMD_TAILQ_CHECK_NEXT(listelm, field); \
(elm)->field.tqe_prev = (head)->tqh_last; \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
*(head)->tqh_last = (elm); \ TAILQ_NEXT((elm), field)->field.tqe_prev = \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \ &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \ else { \
QMD_TRACE_ELEM(&(elm)->field); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0) } while (0)
#define TAILQ_LAST(head, headname) \ #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(*(((struct headname *)((head)->tqh_last))->tqh_last)) QMD_TAILQ_CHECK_PREV(listelm, field); \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
#define TAILQ_PREV(elm, headname, field) \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
#define TAILQ_REMOVE(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0) } while (0)
#if defined(__cplusplus) #define TAILQ_INSERT_HEAD(head, elm, field) do { \
} QMD_TAILQ_CHECK_HEAD(head, field); \
#endif if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
#endif /* !_DB_QUEUE_H_ */ TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
TAILQ_FIRST((head)) = (elm); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
QMD_TAILQ_CHECK_TAIL(head, field); \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
QMD_TAILQ_CHECK_NEXT(elm, field); \
QMD_TAILQ_CHECK_PREV(elm, field); \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT(*oldnext); \
TRASHIT(*oldprev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_SWAP(head1, head2, type, field) do { \
struct type *swap_first = (head1)->tqh_first; \
struct type **swap_last = (head1)->tqh_last; \
(head1)->tqh_first = (head2)->tqh_first; \
(head1)->tqh_last = (head2)->tqh_last; \
(head2)->tqh_first = swap_first; \
(head2)->tqh_last = swap_last; \
if ((swap_first = (head1)->tqh_first) != NULL) \
swap_first->field.tqe_prev = &(head1)->tqh_first; \
else \
(head1)->tqh_last = &(head1)->tqh_first; \
if ((swap_first = (head2)->tqh_first) != NULL) \
swap_first->field.tqe_prev = &(head2)->tqh_first; \
else \
(head2)->tqh_last = &(head2)->tqh_first; \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

View file

@ -72,6 +72,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->config))) { enif_inspect_binary(env, argv[1], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
}, },
{ // work { // work
@ -194,6 +195,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -222,6 +224,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -252,6 +255,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[3], &args->config))) { enif_inspect_binary(env, argv[3], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[3]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -280,6 +284,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -306,6 +311,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->config))) { enif_inspect_binary(env, argv[1], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -334,6 +340,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -364,6 +371,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -392,6 +400,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->config))) { enif_inspect_binary(env, argv[2], &args->config))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -420,6 +429,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->key))) { enif_inspect_binary(env, argv[2], &args->key))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -460,6 +470,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->key))) { enif_inspect_binary(env, argv[2], &args->key))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -515,6 +526,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[3], &args->value))) { enif_inspect_binary(env, argv[3], &args->value))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[3]);
enif_keep_resource((void*)args->session_handle); enif_keep_resource((void*)args->session_handle);
}, },
{ // work { // work
@ -812,6 +825,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->key))) { enif_inspect_binary(env, argv[1], &args->key))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->cursor_handle); enif_keep_resource((void*)args->cursor_handle);
}, },
{ // work { // work
@ -845,6 +859,7 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[1], &args->key))) { enif_inspect_binary(env, argv[1], &args->key))) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_keep_resource((void*)args->cursor_handle); enif_keep_resource((void*)args->cursor_handle);
}, },
{ // work { // work
@ -906,6 +921,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) { enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle); enif_keep_resource((void*)args->cursor_handle);
}, },
{ // work { // work
@ -944,6 +961,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) { enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle); enif_keep_resource((void*)args->cursor_handle);
}, },
{ // work { // work
@ -982,6 +1001,8 @@ ASYNC_NIF_DECL(
enif_inspect_binary(env, argv[2], &args->value)) { enif_inspect_binary(env, argv[2], &args->value)) {
ASYNC_NIF_RETURN_BADARG(); ASYNC_NIF_RETURN_BADARG();
} }
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[1]);
enif_make_copy(ASYNC_NIF_WORK_ENV, argv[2]);
enif_keep_resource((void*)args->cursor_handle); enif_keep_resource((void*)args->cursor_handle);
}, },
{ // work { // work

View file

@ -25,11 +25,11 @@
begin begin
NIFRef = erlang:make_ref(), NIFRef = erlang:make_ref(),
case erlang:apply(Fun, [NIFRef|Args]) of case erlang:apply(Fun, [NIFRef|Args]) of
{ok, QDepth} -> {ok, {enqueued, QDepth}} ->
erlang:bump_reductions(100 * QDepth), [erlang:bump_reductions(10 * QDepth) || is_integer(QDepth), QDepth > 100],
receive receive
{NIFRef, {error, shutdown}=Error} -> {NIFRef, {error, shutdown}=Error} ->
%% Work unit was not executed, requeue it? %% Work unit was queued, but not executed.
Error; Error;
{NIFRef, {error, _Reason}=Error} -> {NIFRef, {error, _Reason}=Error} ->
%% Work unit returned an error. %% Work unit returned an error.