Compare commits

..

53 commits

Author SHA1 Message Date
Gregory Burd
cc807a97d6 Add new mmap config option. 2014-01-15 10:45:04 -05:00
Gregory Burd
ea99493ea3 Compenstate for LSM config API changes 2013-12-09 12:54:58 -05:00
Gregory Burd
08b2d18463 Fix checkpoint config. 2013-11-20 13:10:28 -05:00
Gregory Burd
3302ab26ed Use the develop branch for now. 2013-11-19 14:16:26 -05:00
Gregory Burd
db2daf99b2 Default logging off. 2013-11-19 14:16:01 -05:00
Gregory Burd
68d9ed942b Update to WiredTiger 1.6.6 2013-11-18 20:54:59 -05:00
Gregory Burd
36faa4e713 Forgot to remove second use of checkpoint setting. 2013-11-18 20:50:26 -05:00
Gregory Burd
e560185420 When logging enable checkpoints, even when using LSM. 2013-11-18 20:46:22 -05:00
Gregory Burd
448c0b555c Update config to match latest available options. 2013-10-30 15:00:41 -04:00
Gregory Burd
634bcd188a Integrate new configuration options available in WiredTiger. 2013-10-30 14:50:14 -04:00
Gregory Burd
1664fdcf8c API for handlers in WiredTiger changed to include session state, update our use of the API to match that change. 2013-10-30 13:11:14 -04:00
Gregory Burd
95515f111c Merge pull request #11 from basho-labs/gsb-2.0-fixes
Changes related to Riak 2.0 and an issue with how statistics were gathered from the backend
2013-10-30 08:53:49 -07:00
Gregory Burd
ac2c5caeff Change a few default configs and comment out the stats gathering for now. 2013-10-30 11:50:20 -04:00
Gregory Burd
75305dae94 Minor updates. 2013-10-12 21:48:05 -04:00
Gregory Burd
7d0ad2dce1 Update the version strings and a few config values which changed names. 2013-10-02 14:42:26 -04:00
Gregory Burd
84a85bbe38 Open a *statistics* cursor when gathering statistics. 2013-10-02 14:41:36 -04:00
Gregory Burd
9d2896016b A few build automation changes/fixes. 2013-10-02 14:41:06 -04:00
Gregory Burd
17585a99b1 priv now has the schema file in it, so be more specific with what we ignore in that dir 2013-10-02 14:38:41 -04:00
Gregory Burd
942e51b753 OS/X uses ".dylib" rather than ".so" for shared libraries (because it's
special) so I've worked around that.  Also tightened up some tests so that
we're not rebuilding the libraries when not necessary.
2013-09-06 09:54:55 -04:00
Gregory Burd
c60fa22422 Retry three times, then bail out and return not found. 2013-09-04 13:12:37 -04:00
Gregory Burd
48419ce4d0 Start the penalty after queues are 25% full because a) that makes sense, and b)
that avoids some odd badarith errors when PctBusy is very small.
2013-08-21 14:19:52 -04:00
Gregory Burd
2ddf0da53e Use malloc/free rather than enif_alloc/enif_free so as to avoid BEAM allocator
overhead (bytes and time).  Create static references to commonly used Erlang
atoms to avoid overhead re-creating them on each request cycle.
2013-08-21 12:20:19 -04:00
Gregory Burd
83c3faf74f Use malloc/free rather than enif_alloc/enif_free so as to avoid BEAM allocator
overhead (bytes and time).
2013-08-21 12:18:24 -04:00
Gregory Burd
2043e8ccc6 Because the build decends into the ext/compressors/snappy directory the
relative paths won't find system/include, so use the absolute paths instead.
2013-08-21 12:17:18 -04:00
Gregory Burd
33c8e53ccf Update to latest release of WiredTiger. Also, make sure Snappy builds before WiredTiger. 2013-08-21 12:16:24 -04:00
Gregory Burd
1bf66ae960 Every enqueued request now includes a hint as to how much work is pending in
the lower C-code.  We use that to scale the reduction count penalty so that we
can (hopefully) signal to the Erlang scheduler enough information for it to
properly throttle work.  'eagain' should only happen when queues are full, we
have no choice but to keep this calling proc busy in a recursive loop trying
the request over and over if we're going to preserve request ordering.
2013-08-21 12:15:34 -04:00
Gregory Burd
e67da86a9b Change backpressure method from EAGAIN to bump_reductions so as not to block Riak/KV vnode processes when queues backup. 2013-08-19 13:32:58 -04:00
Gregory Burd
2047104cda Remove the sleep from async_nif's EAGAIN path because it doesn't seem to have a positive effect. 2013-08-19 12:20:36 -04:00
Gregory Burd
96d43d5d17 Re-use the unchanging value of 'Args' rather than including it in every recursive call. 2013-08-02 14:22:30 -04:00
Gregory Burd
05c8c615ef I think the make_ref() needs to be within the fun()'s context to trigger selective receive optimization in the beam's runtime. 2013-08-01 10:02:21 -04:00
Gregory Burd
f153509409 With some input from Jon I've managed to reduce this back into a macro rather than a fun and a macro calling a fun. He also suggested that on eagain I sleep a small amount of time so as to allow other work to catch up a bit. 2013-07-31 15:39:55 -04:00
Gregory Burd
ee904b4769 Lower the queue size to shrink potential for latency in queue. Remove earlier idea that more queues would lead to more even worker progress, just have 1 queue per Erlang-scheduler thread (generally, 1 per CPU core available). Also change the way worker threads decide when to cond_wait or migrate to other queues looking for work. 2013-07-31 15:06:28 -04:00
Gregory Burd
c9a4ab8325 Revert changes to async_nif and re-enable stats. Fixed selective recv. 2013-07-31 09:41:36 -04:00
Gregory Burd
2393257bef Really disable stats. 2013-07-30 14:34:04 -04:00
Gregory Burd
211ffd884c Ignore requests for stats for right now. 2013-07-30 14:30:04 -04:00
Gregory Burd
4418a74183 Increase the number of queues for work to reside. Worker threads, once started, don't exit until shutdown. 2013-07-30 14:21:26 -04:00
Gregory Burd
1623d5293c Increase the max queue size. 2013-07-30 13:30:43 -04:00
Gregory Burd
56c2ac27c2 Revert to a macro-only, non-recursive on eagain method for managing requests. 2013-07-30 13:27:13 -04:00
Gregory Burd
27dba903ef The ref needs to be in-scope of the recieve for it to be optimized. 2013-07-30 13:20:49 -04:00
Gregory Burd
8f415df69c Merge pull request #10 from basho-labs/gsb-workers-migrate
Worker threads should check for work in other queues before exiting.
2013-07-26 17:12:15 -07:00
Gregory Burd
cce163db9f Fix potential to use uninitialized value when branching. 2013-07-26 20:08:49 -04:00
Gregory Burd
9a5defd8c9 Merge remote-tracking branch 'origin/master' into gsb-workers-migrate
Conflicts:
	c_src/async_nif.h
2013-07-26 10:31:23 -04:00
Gregory Burd
452d7694a6 Added some sanity checking of key/value sizes. Check for EAGAIN/INVAL/NOMEM when starting worker threads. Switch back to the 1.6.3 release branch of WT. 2013-07-26 10:27:21 -04:00
Gregory Burd
3627ff8690 Ensure that on EAGAIN we continue to try to spawn a worker. When workers finish with a queue have them migrate to the other queues looking for work. 2013-07-25 13:29:16 -04:00
Gregory Burd
122963133a Seems logging isn't a valid config value anymore, so remove it. 2013-07-18 13:26:53 -04:00
Gregory Burd
2a847b82d0 Forgot to remove this when I dumped the MRU. 2013-07-18 13:21:22 -04:00
Gregory Burd
2694cc1dba Remove the MRU, it wasn't really effective in most cases anyway and complicated logic (and was buggy). For now the cache mutex will be hot, but eventually I hope to move the cache to a lock-free dequeue. khash.h and cas.h aren't used anymore, so they have been removed. 2013-07-18 13:14:54 -04:00
Gregory Burd
bbadc81d53 Queue depth and num workers can race, so make sure that we start at least one worker when there are none active for that queue. 2013-07-15 16:51:08 -04:00
Gregory Burd
c3d3d39c36 Remove default setting from configuration. 2013-07-15 12:36:28 -04:00
Gregory Burd
bd0323af7a Update to WiredTiger 1.6.3. Fix a condition where a mutex was unlocked twice on eagain when queues were all full. 2013-07-15 12:21:10 -04:00
Gregory Burd
420b658e27 Really fix the lower bound for session handles (session_max) to 1024 (upper bound is 8192). 2013-07-08 19:59:00 -04:00
Gregory Burd
fea52c4ec3 Change lower bound for session handles (session_max) to 1024 (upper bound is 8192). 2013-07-08 19:45:45 -04:00
Gregory Burd
ac835f7617 Reduce precision of bloom filters to something more reasonable to avoid having very large bloom filters in cache. Reduce leaf pages to a more reasonable default. Given all data in Riak is <<Bucket, Key>> enable prefix_compression to hopefully reduce key overhead. 2013-07-08 13:49:44 -04:00
32 changed files with 342 additions and 5979 deletions

4
.gitignore vendored
View file

@ -7,6 +7,8 @@ c_src/*.o
c_src/bzip2-1.0.6
c_src/snappy-1.0.4
deps/
priv/
priv/wt
priv/*.so*
priv/*.dylib*
log/
*~

View file

@ -52,19 +52,17 @@ endif
.PHONY: all compile doc clean test dialyzer typer shell distclean pdf \
update-deps clean-common-test-data rebuild
all: deps compile test
all: deps compile
# =============================================================================
# Rules to build the system
# =============================================================================
deps:
c_src/build_deps.sh get-deps
$(REBAR) get-deps
$(REBAR) compile
update-deps:
c_src/build_deps.sh update-deps
$(REBAR) update-deps
$(REBAR) compile

View file

@ -34,9 +34,18 @@ extern "C" {
#define ASYNC_NIF_MAX_WORKERS 1024
#define ASYNC_NIF_MIN_WORKERS 2
#define ASYNC_NIF_WORKER_QUEUE_SIZE 100
#define ASYNC_NIF_WORKER_QUEUE_SIZE 8192
#define ASYNC_NIF_MAX_QUEUED_REQS ASYNC_NIF_WORKER_QUEUE_SIZE * ASYNC_NIF_MAX_WORKERS
/* Atoms (initialized in on_load) */
static ERL_NIF_TERM ATOM_EAGAIN;
static ERL_NIF_TERM ATOM_ENOMEM;
static ERL_NIF_TERM ATOM_ENQUEUED;
static ERL_NIF_TERM ATOM_ERROR;
static ERL_NIF_TERM ATOM_OK;
static ERL_NIF_TERM ATOM_SHUTDOWN;
struct async_nif_req_entry {
ERL_NIF_TERM ref;
ErlNifEnv *env;
@ -53,6 +62,7 @@ struct async_nif_work_queue {
unsigned int depth;
ErlNifMutex *reqs_mutex;
ErlNifCond *reqs_cnd;
struct async_nif_work_queue *next;
STAILQ_HEAD(reqs, async_nif_req_entry) reqs;
};
@ -103,25 +113,20 @@ struct async_nif_state {
argc -= 1; \
/* Note: !!! this assumes that the first element of priv_data is ours */ \
struct async_nif_state *async_nif = *(struct async_nif_state**)enif_priv_data(env); \
if (async_nif->shutdown) { \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "shutdown")); \
} \
if (async_nif->shutdown) \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_SHUTDOWN); \
req = async_nif_reuse_req(async_nif); \
if (!req) { \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "eagain")); \
} \
if (!req) \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_ENOMEM); \
new_env = req->env; \
DPRINTF("async_nif: calling \"%s\"", __func__); \
do pre_block while(0); \
DPRINTF("async_nif: returned from \"%s\"", __func__); \
copy_of_args = (struct decl ## _args *)enif_alloc(sizeof(struct decl ## _args)); \
copy_of_args = (struct decl ## _args *)malloc(sizeof(struct decl ## _args)); \
if (!copy_of_args) { \
fn_post_ ## decl (args); \
async_nif_recycle_req(req, async_nif); \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "enomem")); \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_ENOMEM); \
} \
memcpy(copy_of_args, args, sizeof(struct decl ## _args)); \
req->ref = enif_make_copy(new_env, argv_in[0]); \
@ -136,9 +141,8 @@ struct async_nif_state {
if (!reply) { \
fn_post_ ## decl (args); \
async_nif_recycle_req(req, async_nif); \
enif_free(copy_of_args); \
return enif_make_tuple2(env, enif_make_atom(env, "error"), \
enif_make_atom(env, "eagain")); \
free(copy_of_args); \
return enif_make_tuple2(env, ATOM_ERROR, ATOM_EAGAIN); \
} \
return reply; \
}
@ -146,16 +150,16 @@ struct async_nif_state {
#define ASYNC_NIF_INIT(name) \
static ErlNifMutex *name##_async_nif_coord = NULL;
#define ASYNC_NIF_LOAD(name, priv) do { \
#define ASYNC_NIF_LOAD(name, env, priv) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create(NULL); \
name##_async_nif_coord = enif_mutex_create("nif_coord load"); \
enif_mutex_lock(name##_async_nif_coord); \
priv = async_nif_load(); \
priv = async_nif_load(env); \
enif_mutex_unlock(name##_async_nif_coord); \
} while(0);
#define ASYNC_NIF_UNLOAD(name, env, priv) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create(NULL); \
name##_async_nif_coord = enif_mutex_create("nif_coord unload"); \
enif_mutex_lock(name##_async_nif_coord); \
async_nif_unload(env, priv); \
enif_mutex_unlock(name##_async_nif_coord); \
@ -164,7 +168,7 @@ struct async_nif_state {
} while(0);
#define ASYNC_NIF_UPGRADE(name, env) do { \
if (!name##_async_nif_coord) \
name##_async_nif_coord = enif_mutex_create(NULL); \
name##_async_nif_coord = enif_mutex_create("nif_coord upgrade"); \
enif_mutex_lock(name##_async_nif_coord); \
async_nif_upgrade(env); \
enif_mutex_unlock(name##_async_nif_coord); \
@ -191,15 +195,15 @@ async_nif_reuse_req(struct async_nif_state *async_nif)
enif_mutex_lock(async_nif->recycled_req_mutex);
if (STAILQ_EMPTY(&async_nif->recycled_reqs)) {
if (async_nif->num_reqs < ASYNC_NIF_MAX_QUEUED_REQS) {
req = enif_alloc(sizeof(struct async_nif_req_entry));
req = malloc(sizeof(struct async_nif_req_entry));
if (req) {
memset(req, 0, sizeof(struct async_nif_req_entry));
env = enif_alloc_env();
if (env) {
req->env = env;
async_nif->num_reqs++;
__sync_fetch_and_add(&async_nif->num_reqs, 1);
} else {
enif_free(req);
free(req);
req = NULL;
}
}
@ -253,7 +257,7 @@ async_nif_start_worker(struct async_nif_state *async_nif, struct async_nif_work_
SLIST_REMOVE(&async_nif->we_joining, we, async_nif_worker_entry, entries);
void *exit_value = 0; /* We ignore the thread_join's exit value. */
enif_thread_join(we->tid, &exit_value);
enif_free(we);
free(we);
async_nif->we_active--;
we = n;
}
@ -263,7 +267,7 @@ async_nif_start_worker(struct async_nif_state *async_nif, struct async_nif_work_
return EAGAIN;
}
we = enif_alloc(sizeof(struct async_nif_worker_entry));
we = malloc(sizeof(struct async_nif_worker_entry));
if (!we) {
enif_mutex_unlock(async_nif->we_mutex);
return ENOMEM;
@ -287,9 +291,9 @@ static ERL_NIF_TERM
async_nif_enqueue_req(struct async_nif_state* async_nif, struct async_nif_req_entry *req, int hint)
{
/* Identify the most appropriate worker for this request. */
unsigned int i, qid = 0;
unsigned int i, last_qid, qid = 0;
struct async_nif_work_queue *q = NULL;
double avg_depth;
double avg_depth = 0.0;
/* Either we're choosing a queue based on some affinity/hinted value or we
need to select the next queue in the rotation and atomically update that
@ -297,9 +301,10 @@ async_nif_enqueue_req(struct async_nif_state* async_nif, struct async_nif_req_en
if (hint >= 0) {
qid = (unsigned int)hint;
} else {
qid = async_nif->next_q;
qid = (qid + 1) % async_nif->num_queues;
async_nif->next_q = qid;
do {
last_qid = __sync_fetch_and_add(&async_nif->next_q, 0);
qid = (last_qid + 1) % async_nif->num_queues;
} while (!__sync_bool_compare_and_swap(&async_nif->next_q, last_qid, qid));
}
/* Now we inspect and interate across the set of queues trying to select one
@ -314,18 +319,13 @@ async_nif_enqueue_req(struct async_nif_state* async_nif, struct async_nif_req_en
avg_depth += async_nif->queues[j].depth;
}
}
if (avg_depth != 0)
avg_depth /= n;
if (avg_depth) avg_depth /= n;
/* Lock this queue under consideration, then check for shutdown. While
we hold this lock either a) we're shutting down so exit now or b) this
queue will be valid until we release the lock. */
q = &async_nif->queues[qid];
enif_mutex_lock(q->reqs_mutex);
if (async_nif->shutdown) {
enif_mutex_unlock(q->reqs_mutex);
return 0;
}
/* Try not to enqueue a request into a queue that isn't keeping up with
the request volume. */
@ -343,18 +343,25 @@ async_nif_enqueue_req(struct async_nif_state* async_nif, struct async_nif_req_en
/* Add the request to the queue. */
STAILQ_INSERT_TAIL(&q->reqs, req, entries);
q->depth++;
__sync_fetch_and_add(&q->depth, 1);
/* We've selected a queue for this new request now check to make sure there are
enough workers actively processing requests on this queue. */
if (q->depth > q->num_workers || q->num_workers == 0)
if (async_nif_start_worker(async_nif, q) == 0) q->num_workers++;
while (q->depth > q->num_workers) {
switch(async_nif_start_worker(async_nif, q)) {
case EINVAL: case ENOMEM: default: return 0;
case EAGAIN: continue;
case 0: __sync_fetch_and_add(&q->num_workers, 1); goto done;
}
}done:;
/* Build the term before releasing the lock so as not to race on the use of
the req pointer (which will soon become invalid in another thread
performing the request). */
ERL_NIF_TERM reply = enif_make_tuple2(req->env, enif_make_atom(req->env, "ok"),
enif_make_atom(req->env, "enqueued"));
double pct_full = (double)avg_depth / (double)ASYNC_NIF_WORKER_QUEUE_SIZE;
ERL_NIF_TERM reply = enif_make_tuple2(req->env, ATOM_OK,
enif_make_tuple2(req->env, ATOM_ENQUEUED,
enif_make_double(req->env, pct_full)));
enif_cond_signal(q->reqs_cnd);
enif_mutex_unlock(q->reqs_mutex);
return reply;
@ -373,6 +380,7 @@ async_nif_worker_fn(void *arg)
struct async_nif_state *async_nif = we->async_nif;
struct async_nif_work_queue *q = we->q;
struct async_nif_req_entry *req = NULL;
unsigned int tries = async_nif->num_queues;
for(;;) {
/* Examine the request queue, are there things to be done? */
@ -384,22 +392,33 @@ async_nif_worker_fn(void *arg)
}
if (STAILQ_EMPTY(&q->reqs)) {
/* Queue is empty so we wait for more work to arrive. */
if (q->num_workers > ASYNC_NIF_MIN_WORKERS) {
enif_mutex_unlock(q->reqs_mutex);
break;
} else {
enif_cond_wait(q->reqs_cnd, q->reqs_mutex);
goto check_again_for_work;
}
enif_mutex_unlock(q->reqs_mutex);
if (tries == 0 && q == we->q) {
if (q->num_workers > ASYNC_NIF_MIN_WORKERS) {
/* At this point we've tried to find/execute work on all queues
* and there are at least MIN_WORKERS on this queue so we
* leaving this loop (break) which leads to a thread exit/join. */
break;
} else {
enif_mutex_lock(q->reqs_mutex);
enif_cond_wait(q->reqs_cnd, q->reqs_mutex);
goto check_again_for_work;
}
} else {
tries--;
__sync_fetch_and_add(&q->num_workers, -1);
q = q->next;
__sync_fetch_and_add(&q->num_workers, 1);
continue; // try next queue
}
} else {
/* At this point the next req is ours to process and we hold the
reqs_mutex lock. Take the request off the queue. */
req = STAILQ_FIRST(&q->reqs);
STAILQ_REMOVE(&q->reqs, req, async_nif_req_entry, entries);
q->depth--;
__sync_fetch_and_add(&q->depth, -1);
/* Ensure that there is at least one other worker thread watching this
queue. */
/* Wake up other worker thread watching this queue to help process work. */
enif_cond_signal(q->reqs_cnd);
enif_mutex_unlock(q->reqs_mutex);
@ -413,7 +432,7 @@ async_nif_worker_fn(void *arg)
req->ref = 0;
req->fn_work = 0;
req->fn_post = 0;
enif_free(req->args);
free(req->args);
req->args = NULL;
async_nif_recycle_req(req, async_nif);
req = NULL;
@ -422,7 +441,7 @@ async_nif_worker_fn(void *arg)
enif_mutex_lock(async_nif->we_mutex);
SLIST_INSERT_HEAD(&async_nif->we_joining, we, entries);
enif_mutex_unlock(async_nif->we_mutex);
q->num_workers--;
__sync_fetch_and_add(&q->num_workers, -1);
enif_thread_exit(0);
return 0;
}
@ -465,7 +484,7 @@ async_nif_unload(ErlNifEnv *env, struct async_nif_state *async_nif)
SLIST_REMOVE(&async_nif->we_joining, we, async_nif_worker_entry, entries);
void *exit_value = 0; /* We ignore the thread_join's exit value. */
enif_thread_join(we->tid, &exit_value);
enif_free(we);
free(we);
async_nif->we_active--;
we = n;
}
@ -484,12 +503,11 @@ async_nif_unload(ErlNifEnv *env, struct async_nif_state *async_nif)
struct async_nif_req_entry *n = STAILQ_NEXT(req, entries);
enif_clear_env(req->env);
enif_send(NULL, &req->pid, req->env,
enif_make_tuple2(req->env, enif_make_atom(req->env, "error"),
enif_make_atom(req->env, "shutdown")));
enif_make_tuple2(req->env, ATOM_ERROR, ATOM_SHUTDOWN));
req->fn_post(req->args);
enif_free_env(req->env);
enif_free(req->args);
enif_free(req);
free(req->args);
free(req);
req = n;
}
enif_mutex_destroy(q->reqs_mutex);
@ -503,18 +521,18 @@ async_nif_unload(ErlNifEnv *env, struct async_nif_state *async_nif)
while(req != NULL) {
struct async_nif_req_entry *n = STAILQ_NEXT(req, entries);
enif_free_env(req->env);
enif_free(req);
free(req);
req = n;
}
enif_mutex_unlock(async_nif->recycled_req_mutex);
enif_mutex_destroy(async_nif->recycled_req_mutex);
memset(async_nif, 0, sizeof(struct async_nif_state) + (sizeof(struct async_nif_work_queue) * async_nif->num_queues));
enif_free(async_nif);
free(async_nif);
}
static void *
async_nif_load()
async_nif_load(ErlNifEnv *env)
{
static int has_init = 0;
unsigned int i, num_queues;
@ -525,6 +543,14 @@ async_nif_load()
if (has_init) return 0;
else has_init = 1;
/* Init some static references to commonly used atoms. */
ATOM_EAGAIN = enif_make_atom(env, "eagain");
ATOM_ENOMEM = enif_make_atom(env, "enomem");
ATOM_ENQUEUED = enif_make_atom(env, "enqueued");
ATOM_ERROR = enif_make_atom(env, "error");
ATOM_OK = enif_make_atom(env, "ok");
ATOM_SHUTDOWN = enif_make_atom(env, "shutdown");
/* Find out how many schedulers there are. */
enif_system_info(&info, sizeof(ErlNifSysInfo));
@ -542,8 +568,8 @@ async_nif_load()
}
/* Init our portion of priv_data's module-specific state. */
async_nif = enif_alloc(sizeof(struct async_nif_state) +
sizeof(struct async_nif_work_queue) * num_queues);
async_nif = malloc(sizeof(struct async_nif_state) +
sizeof(struct async_nif_work_queue) * num_queues);
if (!async_nif)
return NULL;
memset(async_nif, 0, sizeof(struct async_nif_state) +
@ -554,15 +580,16 @@ async_nif_load()
async_nif->next_q = 0;
async_nif->shutdown = 0;
STAILQ_INIT(&async_nif->recycled_reqs);
async_nif->recycled_req_mutex = enif_mutex_create(NULL);
async_nif->we_mutex = enif_mutex_create(NULL);
async_nif->recycled_req_mutex = enif_mutex_create("recycled_req");
async_nif->we_mutex = enif_mutex_create("we");
SLIST_INIT(&async_nif->we_joining);
for (i = 0; i < async_nif->num_queues; i++) {
struct async_nif_work_queue *q = &async_nif->queues[i];
STAILQ_INIT(&q->reqs);
q->reqs_mutex = enif_mutex_create(NULL);
q->reqs_cnd = enif_cond_create(NULL);
q->reqs_mutex = enif_mutex_create("reqs");
q->reqs_cnd = enif_cond_create("reqs");
q->next = &async_nif->queues[(i + 1) % num_queues];
}
return async_nif;
}

View file

@ -1,100 +0,0 @@
/*
* File:
* atomic.h
* Author(s):
* Pascal Felber <pascal.felber@unine.ch>
* Patrick Marlier <patrick.marlier@unine.ch>
* Description:
* Atomic operations.
*
* Copyright (c) 2007-2012.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2
* of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program has a dual license and can also be distributed
* under the terms of the MIT license.
*/
#ifndef _ATOMIC_H_
# define _ATOMIC_H_
# ifdef ATOMIC_BUILTIN
typedef volatile size_t atomic_t;
# ifdef __INTEL_COMPILER
# define ATOMIC_CB __memory_barrier()
# else /* ! __INTEL_COMPILER, assuming __GNUC__ */
# define ATOMIC_CB __asm__ __volatile__("": : :"memory")
# endif /* ! __INTEL_COMPILER */
# ifndef UNSAFE
# warning "This is experimental and shouldn't be used"
/*
Note: __sync_ is available for GCC 4.2+ and ICC 11.1+
But these definitions are not 100% safe:
* need 'a' to be volatile
* no fence for read/store proposed (only full fence)
C11 and C++11 also propose atomic operations.
*/
# define ATOMIC_CAS_FULL(a, e, v) (__sync_bool_compare_and_swap(a, e, v))
# define ATOMIC_FETCH_INC_FULL(a) (__sync_fetch_and_add(a, 1))
# define ATOMIC_FETCH_DEC_FULL(a) (__sync_fetch_and_add(a, -1))
# define ATOMIC_FETCH_ADD_FULL(a, v) (__sync_fetch_and_add(a, v))
# define ATOMIC_LOAD_ACQ(a) (*(a))
# define ATOMIC_LOAD(a) (*(a))
# define ATOMIC_STORE_REL(a, v) (*(a) = (v))
# define ATOMIC_STORE(a, v) (*(a) = (v))
# define ATOMIC_MB_READ /* Nothing */
# define ATOMIC_MB_WRITE /* Nothing */
# define ATOMIC_MB_FULL __sync_synchronize()
# else
/* Use only for testing purposes (single thread benchmarks) */
# define ATOMIC_CAS_FULL(a, e, v) (*(a) = (v), 1)
# define ATOMIC_FETCH_INC_FULL(a) ((*(a))++)
# define ATOMIC_FETCH_DEC_FULL(a) ((*(a))--)
# define ATOMIC_FETCH_ADD_FULL(a, v) ((*(a)) += (v))
# define ATOMIC_LOAD_ACQ(a) (*(a))
# define ATOMIC_LOAD(a) (*(a))
# define ATOMIC_STORE_REL(a, v) (*(a) = (v))
# define ATOMIC_STORE(a, v) (*(a) = (v))
# define ATOMIC_MB_READ /* Nothing */
# define ATOMIC_MB_WRITE /* Nothing */
# define ATOMIC_MB_FULL /* Nothing */
# endif /* UNSAFE */
# else /* ! ATOMIC_BUILTIN */
/* NOTE: enable fence instructions for i386 and amd64 but the mfence instructions seems costly. */
/* # define AO_USE_PENTIUM4_INSTRS */
# include "atomic_ops/atomic_ops.h"
typedef AO_t atomic_t;
# define ATOMIC_CB AO_compiler_barrier()
# define ATOMIC_CAS_FULL(a, e, v) (AO_compare_and_swap_full((volatile AO_t *)(a), (AO_t)(e), (AO_t)(v)))
# define ATOMIC_FETCH_INC_FULL(a) (AO_fetch_and_add1_full((volatile AO_t *)(a)))
# define ATOMIC_FETCH_DEC_FULL(a) (AO_fetch_and_sub1_full((volatile AO_t *)(a)))
# define ATOMIC_FETCH_ADD_FULL(a, v) (AO_fetch_and_add_full((volatile AO_t *)(a), (AO_t)(v)))
# ifdef SAFE
# define ATOMIC_LOAD_ACQ(a) (AO_load_full((volatile AO_t *)(a)))
# define ATOMIC_LOAD(a) (AO_load_full((volatile AO_t *)(a)))
# define ATOMIC_STORE_REL(a, v) (AO_store_full((volatile AO_t *)(a), (AO_t)(v)))
# define ATOMIC_STORE(a, v) (AO_store_full((volatile AO_t *)(a), (AO_t)(v)))
# define ATOMIC_MB_READ AO_nop_full()
# define ATOMIC_MB_WRITE AO_nop_full()
# define ATOMIC_MB_FULL AO_nop_full()
# else /* ! SAFE */
# define ATOMIC_LOAD_ACQ(a) (AO_load_acquire_read((volatile AO_t *)(a)))
# define ATOMIC_LOAD(a) (*((volatile AO_t *)(a)))
# define ATOMIC_STORE_REL(a, v) (AO_store_release((volatile AO_t *)(a), (AO_t)(v)))
# define ATOMIC_STORE(a, v) (*((volatile AO_t *)(a)) = (AO_t)(v))
# define ATOMIC_MB_READ AO_nop_read()
# define ATOMIC_MB_WRITE AO_nop_write()
# define ATOMIC_MB_FULL AO_nop_full()
# endif /* ! SAFE */
# endif /* ! NO_AO */
#endif /* _ATOMIC_H_ */

View file

@ -1,4 +0,0 @@
Originally written by Hans Boehm, with some platform-dependent code
imported from the Boehm-Demers-Weiser GC, where it was contributed
by many others.

View file

@ -1,340 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.

View file

@ -1,2 +0,0 @@
This directory contains a stripped-down (support only gcc) version of libatomic_ops by Hans Boehm.
The official release is available from http://www.hpl.hp.com/research/linux/atomic_ops/.

View file

@ -1,46 +0,0 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Definitions for architectures on which loads and stores of AO_t are
* atomic for all legal alignments.
*/
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
{
assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
/* Cast away the volatile for architectures where volatile adds barrier
semantics. */
return *(AO_t *)addr;
}
#define AO_HAVE_load
AO_INLINE void
AO_store(volatile AO_t *addr, AO_t new_val)
{
assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
(*(AO_t *)addr) = new_val;
}
#define AO_HAVE_store

View file

@ -1,168 +0,0 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Describes architectures on which volatile AO_t, unsigned char, unsigned
* short, and unsigned int loads and stores have acquire/release semantics for
* all normally legal alignments.
*/
//#include "acquire_release_volatile.h"
//#include "char_acquire_release_volatile.h"
//#include "short_acquire_release_volatile.h"
//#include "int_acquire_release_volatile.h"
/*
* This file adds definitions appropriate for environments in which an AO_t
* volatile load has acquire semantics, and an AO_t volatile store has release
* semantics. This is arguably supposed to be true with the standard Itanium
* software conventions.
*/
/*
* Empirically gcc/ia64 does some reordering of ordinary operations around volatiles
* even when we think it shouldn't. Gcc 3.3 and earlier could reorder a volatile store
* with another store. As of March 2005, gcc pre-4 reused previously computed
* common subexpressions across a volatile load.
* Hence we now add compiler barriers for gcc.
*/
#if !defined(AO_GCC_BARRIER)
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER()
# endif
#endif
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *p)
{
AO_t result = *p;
/* A normal volatile load generates an ld.acq */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_load_acquire
AO_INLINE void
AO_store_release(volatile AO_t *p, AO_t val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel */
*p = val;
}
#define AO_HAVE_store_release
/*
* This file adds definitions appropriate for environments in which an unsigned char
* volatile load has acquire semantics, and an unsigned char volatile store has release
* semantics. This is true with the standard Itanium ABI.
*/
#if !defined(AO_GCC_BARRIER)
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER()
# endif
#endif
AO_INLINE unsigned char
AO_char_load_acquire(const volatile unsigned char *p)
{
unsigned char result = *p;
/* A normal volatile load generates an ld.acq */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_char_load_acquire
AO_INLINE void
AO_char_store_release(volatile unsigned char *p, unsigned char val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel */
*p = val;
}
#define AO_HAVE_char_store_release
/*
* This file adds definitions appropriate for environments in which an unsigned short
* volatile load has acquire semantics, and an unsigned short volatile store has release
* semantics. This is true with the standard Itanium ABI.
*/
#if !defined(AO_GCC_BARRIER)
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER()
# endif
#endif
AO_INLINE unsigned short
AO_short_load_acquire(const volatile unsigned short *p)
{
unsigned short result = *p;
/* A normal volatile load generates an ld.acq */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_short_load_acquire
AO_INLINE void
AO_short_store_release(volatile unsigned short *p, unsigned short val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel */
*p = val;
}
#define AO_HAVE_short_store_release
/*
* This file adds definitions appropriate for environments in which an unsigned
* int volatile load has acquire semantics, and an unsigned short volatile
* store has release semantics. This is true with the standard Itanium ABI.
*/
#if !defined(AO_GCC_BARRIER)
# if defined(__GNUC__)
# define AO_GCC_BARRIER() AO_compiler_barrier()
# else
# define AO_GCC_BARRIER()
# endif
#endif
AO_INLINE unsigned int
AO_int_load_acquire(const volatile unsigned int *p)
{
unsigned int result = *p;
/* A normal volatile load generates an ld.acq */
AO_GCC_BARRIER();
return result;
}
#define AO_HAVE_int_load_acquire
AO_INLINE void
AO_int_store_release(volatile unsigned int *p, unsigned int val)
{
AO_GCC_BARRIER();
/* A normal volatile store generates an st.rel */
*p = val;
}
#define AO_HAVE_int_store_release

View file

@ -1,126 +0,0 @@
/*
* Copyright (c) 2003-2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Inclusion of this file signifies that AO_t is in fact int. Hence
* any AO_... operations can also server as AO_int_... operations.
* We currently define only the more important ones here, and allow for
* the normal generalization process to define the others.
* We should probably add others in the future.
*/
#if defined(AO_HAVE_compare_and_swap_full) && \
!defined(AO_HAVE_int_compare_and_swap_full)
# define AO_int_compare_and_swap_full(addr, old, new_val) \
AO_compare_and_swap_full((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_full
# endif
#if defined(AO_HAVE_compare_and_swap_acquire) && \
!defined(AO_HAVE_int_compare_and_swap_acquire)
# define AO_int_compare_and_swap_acquire(addr, old, new_val) \
AO_compare_and_swap_acquire((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_acquire
# endif
#if defined(AO_HAVE_compare_and_swap_release) && \
!defined(AO_HAVE_int_compare_and_swap_release)
# define AO_int_compare_and_swap_release(addr, old, new_val) \
AO_compare_and_swap_release((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_release
# endif
#if defined(AO_HAVE_compare_and_swap_write) && \
!defined(AO_HAVE_int_compare_and_swap_write)
# define AO_int_compare_and_swap_write(addr, old, new_val) \
AO_compare_and_swap_write((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_write
# endif
#if defined(AO_HAVE_compare_and_swap_read) && \
!defined(AO_HAVE_int_compare_and_swap_read)
# define AO_int_compare_and_swap_read(addr, old, new_val) \
AO_compare_and_swap_read((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap_read
# endif
#if defined(AO_HAVE_compare_and_swap) && \
!defined(AO_HAVE_int_compare_and_swap)
# define AO_int_compare_and_swap(addr, old, new_val) \
AO_compare_and_swap((volatile AO_t *)(addr), \
(AO_t)(old), (AO_t)(new_val))
# define AO_HAVE_int_compare_and_swap
# endif
#if defined(AO_HAVE_load_acquire) && \
!defined(AO_HAVE_int_load_acquire)
# define AO_int_load_acquire(addr) \
(int)AO_load_acquire((const volatile AO_t *)(addr))
# define AO_HAVE_int_load_acquire
# endif
#if defined(AO_HAVE_store_release) && \
!defined(AO_HAVE_int_store_release)
# define AO_int_store_release(addr, val) \
AO_store_release((volatile AO_t *)(addr), (AO_t)(val))
# define AO_HAVE_int_store_release
# endif
#if defined(AO_HAVE_fetch_and_add_full) && \
!defined(AO_HAVE_int_fetch_and_add_full)
# define AO_int_fetch_and_add_full(addr, incr) \
(int)AO_fetch_and_add_full((volatile AO_t *)(addr), (AO_t)(incr))
# define AO_HAVE_int_fetch_and_add_full
# endif
#if defined(AO_HAVE_fetch_and_add1_acquire) && \
!defined(AO_HAVE_int_fetch_and_add1_acquire)
# define AO_int_fetch_and_add1_acquire(addr) \
(int)AO_fetch_and_add1_acquire((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_acquire
# endif
#if defined(AO_HAVE_fetch_and_add1_release) && \
!defined(AO_HAVE_int_fetch_and_add1_release)
# define AO_int_fetch_and_add1_release(addr) \
(int)AO_fetch_and_add1_release((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_add1_release
# endif
#if defined(AO_HAVE_fetch_and_sub1_acquire) && \
!defined(AO_HAVE_int_fetch_and_sub1_acquire)
# define AO_int_fetch_and_sub1_acquire(addr) \
(int)AO_fetch_and_sub1_acquire((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_acquire
# endif
#if defined(AO_HAVE_fetch_and_sub1_release) && \
!defined(AO_HAVE_int_fetch_and_sub1_release)
# define AO_int_fetch_and_sub1_release(addr) \
(int)AO_fetch_and_sub1_release((volatile AO_t *)(addr))
# define AO_HAVE_int_fetch_and_sub1_release
# endif

View file

@ -1,348 +0,0 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ATOMIC_OPS_H
#define ATOMIC_OPS_H
#include <assert.h>
#include <stddef.h>
/* We define various atomic operations on memory in a */
/* machine-specific way. Unfortunately, this is complicated */
/* by the fact that these may or may not be combined with */
/* various memory barriers. Thus the actual operations we */
/* define have the form AO_<atomic-op>_<barrier>, for all */
/* plausible combinations of <atomic-op> and <barrier>. */
/* This of course results in a mild combinatorial explosion. */
/* To deal with it, we try to generate derived */
/* definitions for as many of the combinations as we can, as */
/* automatically as possible. */
/* */
/* Our assumption throughout is that the programmer will */
/* specify the least demanding operation and memory barrier */
/* that will guarantee correctness for the implementation. */
/* Our job is to find the least expensive way to implement it */
/* on the applicable hardware. In many cases that will */
/* involve, for example, a stronger memory barrier, or a */
/* combination of hardware primitives. */
/* */
/* Conventions: */
/* "plain" atomic operations are not guaranteed to include */
/* a barrier. The suffix in the name specifies the barrier */
/* type. Suffixes are: */
/* _release: Earlier operations may not be delayed past it. */
/* _acquire: Later operations may not move ahead of it. */
/* _read: Subsequent reads must follow this operation and */
/* preceding reads. */
/* _write: Earlier writes precede both this operation and */
/* later writes. */
/* _full: Ordered with respect to both earlier and later memops.*/
/* _release_write: Ordered with respect to earlier writes. */
/* _acquire_read: Ordered with respect to later reads. */
/* */
/* Currently we try to define the following atomic memory */
/* operations, in combination with the above barriers: */
/* AO_nop */
/* AO_load */
/* AO_store */
/* AO_test_and_set (binary) */
/* AO_fetch_and_add */
/* AO_fetch_and_add1 */
/* AO_fetch_and_sub1 */
/* AO_or */
/* AO_compare_and_swap */
/* */
/* Note that atomicity guarantees are valid only if both */
/* readers and writers use AO_ operations to access the */
/* shared value, while ordering constraints are intended to */
/* apply all memory operations. If a location can potentially */
/* be accessed simultaneously from multiple threads, and one of */
/* those accesses may be a write access, then all such */
/* accesses to that location should be through AO_ primitives. */
/* However if AO_ operations enforce sufficient ordering to */
/* ensure that a location x cannot be accessed concurrently, */
/* or can only be read concurrently, then x can be accessed */
/* via ordinary references and assignments. */
/* */
/* Compare_and_exchange takes an address and an expected old */
/* value and a new value, and returns an int. Nonzero */
/* indicates that it succeeded. */
/* Test_and_set takes an address, atomically replaces it by */
/* AO_TS_SET, and returns the prior value. */
/* An AO_TS_t location can be reset with the */
/* AO_CLEAR macro, which normally uses AO_store_release. */
/* AO_fetch_and_add takes an address and an AO_t increment */
/* value. The AO_fetch_and_add1 and AO_fetch_and_sub1 variants */
/* are provided, since they allow faster implementations on */
/* some hardware. AO_or atomically ors an AO_t value into a */
/* memory location, but does not provide access to the original.*/
/* */
/* We expect this list to grow slowly over time. */
/* */
/* Note that AO_nop_full is a full memory barrier. */
/* */
/* Note that if some data is initialized with */
/* data.x = ...; data.y = ...; ... */
/* AO_store_release_write(&data_is_initialized, 1) */
/* then data is guaranteed to be initialized after the test */
/* if (AO_load_release_read(&data_is_initialized)) ... */
/* succeeds. Furthermore, this should generate near-optimal */
/* code on all common platforms. */
/* */
/* All operations operate on unsigned AO_t, which */
/* is the natural word size, and usually unsigned long. */
/* It is possible to check whether a particular operation op */
/* is available on a particular platform by checking whether */
/* AO_HAVE_op is defined. We make heavy use of these macros */
/* internally. */
/* The rest of this file basically has three sections: */
/* */
/* Some utility and default definitions. */
/* */
/* The architecture dependent section: */
/* This defines atomic operations that have direct hardware */
/* support on a particular platform, mostly by including the */
/* appropriate compiler- and hardware-dependent file. */
/* */
/* The synthesis section: */
/* This tries to define other atomic operations in terms of */
/* those that are explicitly available on the platform. */
/* This section is hardware independent. */
/* We make no attempt to synthesize operations in ways that */
/* effectively introduce locks, except for the debugging/demo */
/* pthread-based implementation at the beginning. A more */
/* realistic implementation that falls back to locks could be */
/* added as a higher layer. But that would sacrifice */
/* usability from signal handlers. */
/* The synthesis section is implemented almost entirely in */
/* atomic_ops_generalize.h. */
/* Some common defaults. Overridden for some architectures. */
#define AO_t size_t
/* The test_and_set primitive returns an AO_TS_VAL_t value. */
/* AO_TS_t is the type of an in-memory test-and-set location. */
#define AO_TS_INITIALIZER (AO_t)AO_TS_CLEAR
/* Platform-dependent stuff: */
#if defined(__GNUC__) || defined(_MSC_VER) || defined(__INTEL_COMPILER) \
|| defined(__DMC__) || defined(__WATCOMC__)
# define AO_INLINE static __inline
#elif defined(__sun)
# define AO_INLINE static inline
#else
# define AO_INLINE static
#endif
#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
# define AO_compiler_barrier() __asm__ __volatile__("" : : : "memory")
#elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
|| defined(__WATCOMC__)
# if defined(_AMD64_) || defined(_M_X64) || _MSC_VER >= 1400
# if defined(_WIN32_WCE)
/* # include <cmnintrin.h> */
# elif defined(_MSC_VER)
# include <intrin.h>
# endif
# pragma intrinsic(_ReadWriteBarrier)
# define AO_compiler_barrier() _ReadWriteBarrier()
/* We assume this does not generate a fence instruction. */
/* The documentation is a bit unclear. */
# else
# define AO_compiler_barrier() __asm { }
/* The preceding implementation may be preferable here too. */
/* But the documentation warns about VC++ 2003 and earlier. */
# endif
#elif defined(__INTEL_COMPILER)
# define AO_compiler_barrier() __memory_barrier() /* Too strong? IA64-only? */
#elif defined(_HPUX_SOURCE)
# if defined(__ia64)
# include <machine/sys/inline.h>
# define AO_compiler_barrier() _Asm_sched_fence()
# else
/* FIXME - We dont know how to do this. This is a guess. */
/* And probably a bad one. */
static volatile int AO_barrier_dummy;
# define AO_compiler_barrier() AO_barrier_dummy = AO_barrier_dummy
# endif
#else
/* We conjecture that the following usually gives us the right */
/* semantics or an error. */
# define AO_compiler_barrier() asm("")
#endif
#if defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/generic_pthread.h"
#endif /* AO_USE_PTHREAD_DEFS */
#if defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS) \
&& !defined(__INTEL_COMPILER)
# if defined(__i386__)
/* We don't define AO_USE_SYNC_CAS_BUILTIN for x86 here because */
/* it might require specifying additional options (like -march) */
/* or additional link libraries (if -march is not specified). */
# include "./x86.h"
# endif /* __i386__ */
# if defined(__x86_64__)
# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
/* It is safe to use __sync CAS built-in on this architecture. */
# define AO_USE_SYNC_CAS_BUILTIN
# endif
# include "./x86_64.h"
# endif /* __x86_64__ */
# if defined(__ia64__)
# include "./ia64.h"
# define AO_GENERALIZE_TWICE
# endif /* __ia64__ */
# if defined(__hppa__)
# include "atomic_ops/sysdeps/gcc/hppa.h"
# define AO_CAN_EMUL_CAS
# endif /* __hppa__ */
# if defined(__alpha__)
# include "atomic_ops/sysdeps/gcc/alpha.h"
# define AO_GENERALIZE_TWICE
# endif /* __alpha__ */
# if defined(__s390__)
# include "atomic_ops/sysdeps/gcc/s390.h"
# endif /* __s390__ */
# if defined(__sparc__)
# include "./sparc.h"
# define AO_CAN_EMUL_CAS
# endif /* __sparc__ */
# if defined(__m68k__)
# include "atomic_ops/sysdeps/gcc/m68k.h"
# endif /* __m68k__ */
# if defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
|| defined(__powerpc64__) || defined(__ppc64__)
# include "./powerpc.h"
# endif /* __powerpc__ */
# if defined(__arm__) && !defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/gcc/arm.h"
# define AO_CAN_EMUL_CAS
# endif /* __arm__ */
# if defined(__cris__) || defined(CRIS)
# include "atomic_ops/sysdeps/gcc/cris.h"
# endif
# if defined(__mips__)
# include "atomic_ops/sysdeps/gcc/mips.h"
# endif /* __mips__ */
# if defined(__sh__) || defined(SH4)
# include "atomic_ops/sysdeps/gcc/sh.h"
# define AO_CAN_EMUL_CAS
# endif /* __sh__ */
#endif /* __GNUC__ && !AO_USE_PTHREAD_DEFS */
#if defined(__INTEL_COMPILER) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__ia64__)
# include "./ia64.h"
# define AO_GENERALIZE_TWICE
# endif
# if defined(__GNUC__)
/* Intel Compiler in GCC compatible mode */
# if defined(__i386__)
# include "./x86.h"
# endif /* __i386__ */
# if defined(__x86_64__)
# if __INTEL_COMPILER > 1110
# define AO_USE_SYNC_CAS_BUILTIN
# endif
# include "./x86_64.h"
# endif /* __x86_64__ */
# endif
#endif
#if defined(_HPUX_SOURCE) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
# if defined(__ia64)
# include "atomic_ops/sysdeps/hpc/ia64.h"
# define AO_GENERALIZE_TWICE
# else
# include "atomic_ops/sysdeps/hpc/hppa.h"
# define AO_CAN_EMUL_CAS
# endif
#endif
#if defined(__sun) && !defined(__GNUC__) && !defined(AO_USE_PTHREAD_DEFS)
/* Note: use -DAO_USE_PTHREAD_DEFS if Sun CC does not handle inline asm. */
# if defined(__i386)
# include "atomic_ops/sysdeps/sunc/x86.h"
# endif /* __i386 */
# if defined(__x86_64) || defined(__amd64)
# include "atomic_ops/sysdeps/sunc/x86_64.h"
# endif /* __x86_64 */
#endif
#if !defined(__GNUC__) && (defined(sparc) || defined(__sparc)) \
&& !defined(AO_USE_PTHREAD_DEFS)
# include "atomic_ops/sysdeps/sunc/sparc.h"
# define AO_CAN_EMUL_CAS
#endif
#if defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
|| (defined(__WATCOMC__) && defined(__NT__))
# if defined(_AMD64_) || defined(_M_X64)
# include "atomic_ops/sysdeps/msftc/x86_64.h"
# elif defined(_M_IX86) || defined(x86)
# include "atomic_ops/sysdeps/msftc/x86.h"
# elif defined(_M_ARM) || defined(ARM) || defined(_ARM_)
# include "atomic_ops/sysdeps/msftc/arm.h"
# endif
#endif
#if defined(AO_REQUIRE_CAS) && !defined(AO_HAVE_compare_and_swap) \
&& !defined(AO_HAVE_compare_and_swap_full) \
&& !defined(AO_HAVE_compare_and_swap_acquire)
# if defined(AO_CAN_EMUL_CAS)
# include "atomic_ops/sysdeps/emul_cas.h"
# else
# error Cannot implement AO_compare_and_swap_full on this architecture.
# endif
#endif /* AO_REQUIRE_CAS && !AO_HAVE_compare_and_swap ... */
/* The most common way to clear a test-and-set location */
/* at the end of a critical section. */
#if AO_AO_TS_T && !defined(AO_CLEAR)
# define AO_CLEAR(addr) AO_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
#endif
#if AO_CHAR_TS_T && !defined(AO_CLEAR)
# define AO_CLEAR(addr) AO_char_store_release((AO_TS_t *)(addr), AO_TS_CLEAR)
#endif
/*
* The generalization section.
* Theoretically this should repeatedly include atomic_ops_generalize.h.
* In fact, we observe that this converges after a small fixed number
* of iterations, usually one.
*/
#include "./generalize.h"
#ifdef AO_GENERALIZE_TWICE
# include "./generalize.h"
#endif
/* For compatibility with version 0.4 and earlier */
#define AO_TS_T AO_TS_t
#define AO_T AO_t
#define AO_TS_VAL AO_TS_VAL_t
#endif /* ATOMIC_OPS_H */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,297 +0,0 @@
/*
* Copyright (c) 2003 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "./aligned_atomic_load_store.h"
#include "./all_acquire_release_volatile.h"
#include "./test_and_set_t_is_char.h"
#ifdef _ILP32
/* 32-bit HP/UX code. */
/* This requires pointer "swizzling". Pointers need to be expanded */
/* to 64 bits using the addp4 instruction before use. This makes it */
/* hard to share code, but we try anyway. */
# define AO_LEN "4"
/* We assume that addr always appears in argument position 1 in asm */
/* code. If it is clobbered due to swizzling, we also need it in */
/* second position. Any later arguments are referenced symbolically, */
/* so that we don't have to worry about their position. This requires*/
/* gcc 3.1, but you shouldn't be using anything older than that on */
/* IA64 anyway. */
/* The AO_MASK macro is a workaround for the fact that HP/UX gcc */
/* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */
/* doesn't appear to clear high bits in a pointer value we pass into */
/* assembly code, even if it is supposedly of type AO_t. */
# define AO_IN_ADDR "1"(addr)
# define AO_OUT_ADDR , "=r"(addr)
# define AO_SWIZZLE "addp4 %1=0,%1;;\n"
# define AO_MASK(ptr) __asm__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr));
#else
# define AO_LEN "8"
# define AO_IN_ADDR "r"(addr)
# define AO_OUT_ADDR
# define AO_SWIZZLE
# define AO_MASK(ptr)
#endif
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mf" : : : "memory");
}
#define AO_HAVE_nop_full
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_acquire
AO_INLINE AO_t
AO_fetch_and_add1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
AO_fetch_and_sub1_acquire (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".acq %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
AO_fetch_and_sub1_release (volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (AO_SWIZZLE
"fetchadd" AO_LEN ".rel %0=[%1],-1":
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_fetch_and_sub1_release
#ifndef _ILP32
AO_INLINE unsigned int
AO_int_fetch_and_add1_acquire (volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1":
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_int_fetch_and_add1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_add1_release (volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],1":
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_int_fetch_and_add1_release
AO_INLINE unsigned int
AO_int_fetch_and_sub1_acquire (volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],-1":
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_int_fetch_and_sub1_acquire
AO_INLINE unsigned int
AO_int_fetch_and_sub1_release (volatile unsigned int *addr)
{
unsigned int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1":
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
#define AO_HAVE_int_fetch_and_sub1_release
#endif /* !_ILP32 */
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
AO_t oldval;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".acq %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr,
AO_t old, AO_t new_val)
{
AO_t oldval;
AO_MASK(old);
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
".rel %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_char_compare_and_swap_acquire(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char oldval;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
AO_char_compare_and_swap_release(volatile unsigned char *addr,
unsigned char old, unsigned char new_val)
{
unsigned char oldval;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
AO_short_compare_and_swap_acquire(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short oldval;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
AO_short_compare_and_swap_release(volatile unsigned short *addr,
unsigned short old, unsigned short new_val)
{
unsigned short oldval;
__asm__ __volatile__(AO_SWIZZLE
"mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv"
: "=r"(oldval) AO_OUT_ADDR
: AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
: "memory");
return (oldval == old);
}
#define AO_HAVE_short_compare_and_swap_release
#ifndef _ILP32
AO_INLINE int
AO_int_compare_and_swap_acquire(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int oldval;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv"
: "=r"(oldval)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
#define AO_HAVE_int_compare_and_swap_acquire
AO_INLINE int
AO_int_compare_and_swap_release(volatile unsigned int *addr,
unsigned int old, unsigned int new_val)
{
unsigned int oldval;
__asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv"
: "=r"(oldval)
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
#define AO_HAVE_int_compare_and_swap_release
#endif /* !_ILP32 */
/* FIXME: Add compare_and_swap_double as soon as there is widely */
/* available hardware that implements it. */
/* FIXME: Add compare_double_and_swap_double for the _ILP32 case. */
#ifdef _ILP32
# include "./ao_t_is_int.h"
#endif

View file

@ -1,100 +0,0 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures that provide processor
* ordered memory operations except that a later read may pass an
* earlier write. Real x86 implementations seem to be in this category,
* except apparently for some IDT WinChips, which we ignore.
*/
#include "read_ordered.h"
AO_INLINE void
AO_nop_write(void)
{
AO_compiler_barrier();
/* sfence according to Intel docs. Pentium 3 and up. */
/* Unnecessary for cached accesses? */
}
#define AO_HAVE_NOP_WRITE
#if defined(AO_HAVE_store)
AO_INLINE void
AO_store_write(volatile AO_t *addr, AO_t val)
{
AO_compiler_barrier();
AO_store(addr, val);
}
# define AO_HAVE_store_write
# define AO_store_release(addr, val) AO_store_write(addr, val)
# define AO_HAVE_store_release
#endif /* AO_HAVE_store */
#if defined(AO_HAVE_char_store)
AO_INLINE void
AO_char_store_write(volatile unsigned char *addr, unsigned char val)
{
AO_compiler_barrier();
AO_char_store(addr, val);
}
# define AO_HAVE_char_store_write
# define AO_char_store_release(addr, val) AO_char_store_write(addr, val)
# define AO_HAVE_char_store_release
#endif /* AO_HAVE_char_store */
#if defined(AO_HAVE_short_store)
AO_INLINE void
AO_short_store_write(volatile unsigned short *addr, unsigned short val)
{
AO_compiler_barrier();
AO_short_store(addr, val);
}
# define AO_HAVE_short_store_write
# define AO_short_store_release(addr, val) AO_short_store_write(addr, val)
# define AO_HAVE_short_store_release
#endif /* AO_HAVE_short_store */
#if defined(AO_HAVE_int_store)
AO_INLINE void
AO_int_store_write(volatile unsigned int *addr, unsigned int val)
{
AO_compiler_barrier();
AO_int_store(addr, val);
}
# define AO_HAVE_int_store_write
# define AO_int_store_release(addr, val) AO_int_store_write(addr, val)
# define AO_HAVE_int_store_release
#endif /* AO_HAVE_int_store */

View file

@ -1,346 +0,0 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* Memory model documented at http://www-106.ibm.com/developerworks/ */
/* eserver/articles/archguide.html and (clearer) */
/* http://www-106.ibm.com/developerworks/eserver/articles/powerpc.html. */
/* There appears to be no implicit ordering between any kind of */
/* independent memory references. */
/* Architecture enforces some ordering based on control dependence. */
/* I don't know if that could help. */
/* Data-dependent loads are always ordered. */
/* Based on the above references, eieio is intended for use on */
/* uncached memory, which we don't support. It does not order loads */
/* from cached memory. */
/* Thanks to Maged Michael, Doug Lea, and Roger Hoover for helping to */
/* track some of this down and correcting my misunderstandings. -HB */
/* Earl Chew subsequently contributed further fixes & additions. */
#include "./aligned_atomic_load_store.h"
#include "./test_and_set_t_is_ao_t.h"
/* There seems to be no byte equivalent of lwarx, so this */
/* may really be what we want, at least in the 32-bit case. */
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("sync" : : : "memory");
}
#define AO_HAVE_nop_full
/* lwsync apparently works for everything but a StoreLoad barrier. */
AO_INLINE void
AO_lwsync(void)
{
#ifdef __NO_LWSYNC__
__asm__ __volatile__("sync" : : : "memory");
#else
__asm__ __volatile__("lwsync" : : : "memory");
#endif
}
#define AO_nop_write() AO_lwsync()
#define AO_HAVE_nop_write
#define AO_nop_read() AO_lwsync()
#define AO_HAVE_nop_read
/* We explicitly specify load_acquire, since it is important, and can */
/* be implemented relatively cheaply. It could be implemented */
/* with an ordinary load followed by a lwsync. But the general wisdom */
/* seems to be that a data dependent branch followed by an isync is */
/* cheaper. And the documentation is fairly explicit that this also */
/* has acquire semantics. */
/* ppc64 uses ld not lwz */
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
__asm__ __volatile__ (
"ld%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
"bne- 1f\n"
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cr0");
return result;
}
#else
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
/* FIXME: We should get gcc to allocate one of the condition */
/* registers. I always got "impossible constraint" when I */
/* tried the "y" constraint. */
__asm__ __volatile__ (
"lwz%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
"bne- 1f\n"
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cc");
return result;
}
#endif
#define AO_HAVE_load_acquire
/* We explicitly specify store_release, since it relies */
/* on the fact that lwsync is also a LoadStore barrier. */
AO_INLINE void
AO_store_release(volatile AO_t *addr, AO_t value)
{
AO_lwsync();
*addr = value;
}
#define AO_HAVE_load_acquire
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* Completely untested. And we should be using smaller objects anyway. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
unsigned long oldval;
unsigned long temp = 1; /* locked value */
__asm__ __volatile__(
"1:ldarx %0,0,%1\n" /* load and reserve */
"cmpdi %0, 0\n" /* if load is */
"bne 2f\n" /* non-zero, return already set */
"stdcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
return (AO_TS_VAL_t)oldval;
}
#else
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
int oldval;
int temp = 1; /* locked value */
__asm__ __volatile__(
"1:lwarx %0,0,%1\n" /* load and reserve */
"cmpwi %0, 0\n" /* if load is */
"bne 2f\n" /* non-zero, return already set */
"stwcx. %2,0,%1\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"2:\n" /* oldval is zero if we set */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
return (AO_TS_VAL_t)oldval;
}
#endif
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_test_and_set_acquire(volatile AO_TS_t *addr) {
AO_TS_VAL_t result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_test_and_set_release(volatile AO_TS_t *addr) {
AO_lwsync();
return AO_test_and_set(addr);
}
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t result;
AO_lwsync();
result = AO_test_and_set(addr);
AO_lwsync();
return result;
}
#define AO_HAVE_test_and_set_full
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* FIXME: Completely untested. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t oldval;
int result = 0;
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"cmpd %0, %4\n" /* if load is not equal to */
"bne 2f\n" /* old, fail */
"stdcx. %3,0,%2\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"li %1,1\n" /* result = 1; */
"2:\n"
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
return result;
}
#else
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t oldval;
int result = 0;
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"cmpw %0, %4\n" /* if load is not equal to */
"bne 2f\n" /* old, fail */
"stwcx. %3,0,%2\n" /* else store conditional */
"bne- 1b\n" /* retry if lost reservation */
"li %1,1\n" /* result = 1; */
"2:\n"
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
return result;
}
#endif
#define AO_HAVE_compare_and_swap
AO_INLINE int
AO_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val) {
int result = AO_compare_and_swap(addr, old, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t result;
AO_lwsync();
result = AO_compare_and_swap(addr, old, new_val);
AO_lwsync();
return result;
}
#define AO_HAVE_compare_and_swap_full
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* FIXME: Completely untested. */
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
"stdcx. %1,0,%2\n" /* store conditional */
"bne- 1b\n" /* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
return oldval;
}
#define AO_HAVE_fetch_and_add
#else
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
"stwcx. %1,0,%2\n" /* store conditional */
"bne- 1b\n" /* retry if lost reservation */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
return oldval;
}
#define AO_HAVE_fetch_and_add
#endif
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
AO_t result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_acquire
AO_INLINE AO_t
AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr) {
AO_lwsync();
return AO_fetch_and_add(addr, incr);
}
#define AO_HAVE_fetch_and_add_release
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr) {
AO_t result;
AO_lwsync();
result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
#define AO_HAVE_fetch_and_add_full
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
#else
# include "./ao_t_is_int.h"
#endif

View file

@ -1,100 +0,0 @@
/*
* Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures that provide processor
* ordered memory operations except that a later read may pass an
* earlier write. Real x86 implementations seem to be in this category,
* except apparently for some IDT WinChips, which we ignore.
*/
AO_INLINE void
AO_nop_read(void)
{
AO_compiler_barrier();
}
#define AO_HAVE_NOP_READ
#ifdef AO_HAVE_load
AO_INLINE AO_t
AO_load_read(const volatile AO_t *addr)
{
AO_t result = AO_load(addr);
AO_compiler_barrier();
return result;
}
#define AO_HAVE_load_read
#define AO_load_acquire(addr) AO_load_read(addr)
#define AO_HAVE_load_acquire
#endif /* AO_HAVE_load */
#ifdef AO_HAVE_char_load
AO_INLINE AO_t
AO_char_load_read(const volatile unsigned char *addr)
{
AO_t result = AO_char_load(addr);
AO_compiler_barrier();
return result;
}
#define AO_HAVE_char_load_read
#define AO_char_load_acquire(addr) AO_char_load_read(addr)
#define AO_HAVE_char_load_acquire
#endif /* AO_HAVE_char_load */
#ifdef AO_HAVE_short_load
AO_INLINE AO_t
AO_short_load_read(const volatile unsigned short *addr)
{
AO_t result = AO_short_load(addr);
AO_compiler_barrier();
return result;
}
#define AO_HAVE_short_load_read
#define AO_short_load_acquire(addr) AO_short_load_read(addr)
#define AO_HAVE_short_load_acquire
#endif /* AO_HAVE_short_load */
#ifdef AO_HAVE_int_load
AO_INLINE AO_t
AO_int_load_read(const volatile unsigned int *addr)
{
AO_t result = AO_int_load(addr);
AO_compiler_barrier();
return result;
}
#define AO_HAVE_int_load_read
#define AO_int_load_acquire(addr) AO_int_load_read(addr)
#define AO_HAVE_int_load_acquire
#endif /* AO_HAVE_int_load */

View file

@ -1,72 +0,0 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* FIXME. Very incomplete. No support for sparc64. */
/* Non-ancient SPARCs provide compare-and-swap (casa). */
/* We should make that available. */
#include "./aligned_atomic_load_store.h"
/* Real SPARC code uses TSO: */
#include "./ordered_except_wr.h"
/* Test_and_set location is just a byte. */
#include "./test_and_set_t_is_char.h"
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr) {
AO_TS_VAL_t oldval;
__asm__ __volatile__("ldstub %1,%0"
: "=r"(oldval), "=m"(*addr)
: "m"(*addr) : "memory");
return oldval;
}
#define AO_HAVE_test_and_set_full
#ifndef AO_NO_SPARC_V9
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val) {
char ret;
__asm__ __volatile__ ("membar #StoreLoad | #LoadLoad\n\t"
# if defined(__arch64__)
"casx [%2],%0,%1\n\t"
# else
"cas [%2],%0,%1\n\t" /* 32-bit version */
# endif
"membar #StoreLoad | #StoreStore\n\t"
"cmp %0,%1\n\t"
"be,a 0f\n\t"
"mov 1,%0\n\t"/* one insn after branch always executed */
"clr %0\n\t"
"0:\n\t"
: "=r" (ret), "+r" (new_val)
: "r" (addr), "0" (old)
: "memory", "cc");
return (int)ret;
}
#define AO_HAVE_compare_and_swap_full
#endif /* AO_NO_SPARC_V9 */
/* FIXME: This needs to be extended for SPARC v8 and v9. */
/* SPARC V8 also has swap. V9 has CAS. */
/* There are barriers like membar #LoadStore. */
/* CASA (32-bit) and CASXA(64-bit) instructions were */
/* added in V9. */

View file

@ -1,25 +0,0 @@
/* NEC LE-IT: For 64Bit OS we extend the double type to hold two int64's
*
* x86-64: __m128 serves as placeholder which also requires the compiler
* to align it on 16 byte boundary (as required by cmpxchg16.
* Similar things could be done for PowerPC 64bit using a VMX data type... */
#if (defined(__x86_64__) && defined(__GNUC__)) || defined(_WIN64)
# include <xmmintrin.h>
typedef __m128 double_ptr_storage;
#elif defined(_WIN32) && !defined(__GNUC__)
typedef unsigned __int64 double_ptr_storage;
#else
typedef unsigned long long double_ptr_storage;
#endif
# define AO_HAVE_DOUBLE_PTR_STORAGE
typedef union {
double_ptr_storage AO_whole;
struct {AO_t AO_v1; AO_t AO_v2;} AO_parts;
} AO_double_t;
#define AO_HAVE_double_t
#define AO_val1 AO_parts.AO_v1
#define AO_val2 AO_parts.AO_v2

View file

@ -1,36 +0,0 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures on which test_and_set
* operates on pointer-sized quantities, the "clear" value contains
* all zeroes, and the "set" value contains only one lowest bit set.
* This can be used if test_and_set is synthesized from compare_and_swap.
*/
typedef enum {AO_TS_clear = 0, AO_TS_set = 1} AO_TS_val;
#define AO_TS_VAL_t AO_TS_val
#define AO_TS_CLEAR AO_TS_clear
#define AO_TS_SET AO_TS_set
#define AO_TS_t AO_t
#define AO_AO_TS_T 1

View file

@ -1,38 +0,0 @@
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* These are common definitions for architectures on which test_and_set
* operates on byte sized quantities, the "clear" value contains
* all zeroes, and the "set" value contains all ones.
*/
#define AO_TS_t unsigned char
typedef enum {AO_BYTE_TS_clear = 0, AO_BYTE_TS_set = 0xff} AO_BYTE_TS_val;
#define AO_TS_VAL_t AO_BYTE_TS_val
#define AO_TS_CLEAR AO_BYTE_TS_clear
#define AO_TS_SET AO_BYTE_TS_set
#define AO_CHAR_TS_T 1

View file

@ -1,173 +0,0 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Some of the machine specific code was borrowed from our GC distribution.
*/
/* The following really assume we have a 486 or better. Unfortunately */
/* gcc doesn't define a suitable feature test macro based on command */
/* line options. */
/* We should perhaps test dynamically. */
#include "./aligned_atomic_load_store.h"
/* Real X86 implementations, except for some old WinChips, appear */
/* to enforce ordering between memory operations, EXCEPT that a later */
/* read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
/* We ignore both the WinChips, and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "./ordered_except_wr.h"
#include "./test_and_set_t_is_char.h"
#include "./standard_ao_double_t.h"
#if defined(AO_USE_PENTIUM4_INSTRS)
AO_INLINE void
AO_nop_full(void)
{
__asm__ __volatile__("mfence" : : : "memory");
}
#define AO_HAVE_nop_full
#else
/* We could use the cpuid instruction. But that seems to be slower */
/* than the default implementation based on test_and_set_full. Thus */
/* we omit that bit of misinformation here. */
#endif
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
__asm__ __volatile__ ("lock; xaddl %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
{
unsigned char result;
__asm__ __volatile__ ("lock; xaddb %0, %1" :
"=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
{
unsigned short result;
__asm__ __volatile__ ("lock; xaddw %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_short_fetch_and_add_full
/* Really only works for 486 and later */
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t incr)
{
__asm__ __volatile__ ("lock; orl %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
unsigned char oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__("xchgb %0, %1"
: "=q"(oldval), "=m"(*addr)
: "0"((unsigned char)0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return (int)__sync_bool_compare_and_swap(addr, old, new_val);
# else
char result;
__asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1"
: "=m" (*addr), "=a" (result)
: "m" (*addr), "r" (new_val), "a" (old) : "memory");
return (int)result;
# endif
}
#define AO_HAVE_compare_and_swap_full
/* Returns nonzero if the comparison succeeded. */
/* Really requires at least a Pentium. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
#if __PIC__
/* If PIC is turned on, we can't use %ebx as it is reserved for the
GOT pointer. We can save and restore %ebx because GCC won't be
using it for anything else (such as any of the m operands) */
__asm__ __volatile__("pushl %%ebx;" /* save ebx used for PIC GOT ptr */
"movl %6,%%ebx;" /* move new_val2 to %ebx */
"lock; cmpxchg8b %0; setz %1;"
"pop %%ebx;" /* restore %ebx */
: "=m"(*addr), "=a"(result)
: "m"(*addr), "d" (old_val2), "a" (old_val1),
"c" (new_val2), "m" (new_val1) : "memory");
#else
/* We can't just do the same thing in non-PIC mode, because GCC
* might be using %ebx as the memory operand. We could have ifdef'd
* in a clobber, but there's no point doing the push/pop if we don't
* have to. */
__asm__ __volatile__("lock; cmpxchg8b %0; setz %1;"
: "=m"(*addr), "=a"(result)
: "m"(*addr), "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1) : "memory");
#endif
return (int) result;
}
#define AO_HAVE_compare_double_and_swap_double_full
#include "./ao_t_is_int.h"

View file

@ -1,181 +0,0 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
* Some of the machine specific code was borrowed from our GC distribution.
*/
#include "./aligned_atomic_load_store.h"
/* Real X86 implementations appear */
/* to enforce ordering between memory operations, EXCEPT that a later */
/* read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
/* We ignore the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "./ordered_except_wr.h"
#include "./test_and_set_t_is_char.h"
#include "./standard_ao_double_t.h"
AO_INLINE void
AO_nop_full(void)
{
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm__ __volatile__("mfence" : : : "memory");
}
#define AO_HAVE_nop_full
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
AO_t result;
__asm__ __volatile__ ("lock; xaddq %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
{
unsigned char result;
__asm__ __volatile__ ("lock; xaddb %0, %1" :
"=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
{
unsigned short result;
__asm__ __volatile__ ("lock; xaddw %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
{
unsigned int result;
__asm__ __volatile__ ("lock; xaddl %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
return result;
}
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
AO_or_full (volatile AO_t *p, AO_t incr)
{
__asm__ __volatile__ ("lock; orq %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
{
unsigned char oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
__asm__ __volatile__("xchgb %0, %1"
: "=q"(oldval), "=m"(*addr)
: "0"((unsigned char)0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
{
# ifdef AO_USE_SYNC_CAS_BUILTIN
return (int)__sync_bool_compare_and_swap(addr, old, new_val);
# else
char result;
__asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1"
: "=m" (*addr), "=a" (result)
: "m" (*addr), "r" (new_val), "a" (old) : "memory");
return (int) result;
# endif
}
#define AO_HAVE_compare_and_swap_full
#ifdef AO_CMPXCHG16B_AVAILABLE
/* NEC LE-IT: older AMD Opterons are missing this instruction.
* On these machines SIGILL will be thrown.
* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
* (lock based) version available */
/* HB: Changed this to not define either by default. There are
* enough machines and tool chains around on which cmpxchg16b
* doesn't work. And the emulation is unsafe by our usual rules.
* Hoewever both are clearly useful in certain cases.
*/
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
char result;
__asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
: "=m"(*addr), "=a"(result)
: "m"(*addr), "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1) : "memory");
return (int) result;
}
#define AO_HAVE_compare_double_and_swap_double_full
#else
/* this one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it is */
/* not atomic with respect to other kinds of updates of *addr. On the */
/* other hand, this may be a useful facility on occasion. */
#ifdef AO_WEAK_DOUBLE_CAS_EMULATION
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2);
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
return AO_compare_double_and_swap_double_emulation(addr,
old_val1, old_val2,
new_val1, new_val2);
}
#define AO_HAVE_compare_double_and_swap_double_full
#endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
#endif /* AO_CMPXCHG16B_AVAILABLE */

View file

@ -11,9 +11,10 @@ unset POSIX_SHELL # clear it so if we invoke other scripts, they run as ksh as w
set -e
WT_REPO=http://github.com/wiredtiger/wiredtiger.git
WT_BRANCH=
WT_REF="tags/1.6.3"
WT_DIR=wiredtiger-`basename $WT_REF`
WT_BRANCH=develop
WT_DIR=wiredtiger-`basename $WT_BRANCH`
#WT_REF="tags/1.6.6"
#WT_DIR=wiredtiger-`basename $WT_REF`
SNAPPY_VSN="1.0.4"
SNAPPY_DIR=snappy-$SNAPPY_VSN
@ -25,8 +26,7 @@ export BASEDIR="$PWD"
which gmake 1>/dev/null 2>/dev/null && MAKE=gmake
MAKE=${MAKE:-make}
export CFLAGS="$CFLAGS -I $BASEDIR/system/include"
export CXXFLAGS="$CXXFLAGS -I $BASEDIR/system/include"
export CPPFLAGS="$CPPLAGS -I $BASEDIR/system/include -O3 -mtune=native -march=native"
export LDFLAGS="$LDFLAGS -L$BASEDIR/system/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$BASEDIR/system/lib:$LD_LIBRARY_PATH"
@ -57,7 +57,7 @@ get_wt ()
wt_configure ()
{
(cd $BASEDIR/$WT_DIR/build_posix
../configure --with-pic \
CFLAGS+=-g $BASEDIR/$WT_DIR/configure --with-pic \
--enable-snappy \
--prefix=${BASEDIR}/system || exit 1)
}
@ -75,8 +75,8 @@ get_snappy ()
get_deps ()
{
get_wt;
get_snappy;
get_wt;
}
update_deps ()
@ -109,7 +109,7 @@ build_snappy ()
case "$1" in
clean)
[ -e $BASEDIR/$WT_DIR/build_posix/Makefile ] && \
(cd $BASEDIR/$WT_DIR/build_posix && $MAKE distclean)
(cd $BASEDIR/$WT_DIR/build_posix && $MAKE clean)
rm -rf system $SNAPPY_DIR
rm -f ${BASEDIR}/../priv/wt
rm -f ${BASEDIR}/../priv/libwiredtiger-*.so
@ -130,22 +130,23 @@ case "$1" in
;;
*)
[ -d $WT_DIR ] || get_wt;
[ -d $SNAPPY_DIR ] || get_snappy;
shopt -s extglob
SUFFIXES='@(so|dylib)'
# Build Snappy
[ -d $SNAPPY_DIR ] || get_snappy;
[ -d $BASEDIR/$SNAPPY_DIR ] || (echo "Missing Snappy source directory" && exit 1)
test -f $BASEDIR/system/lib/libsnappy.so.[0-9].[0-9].[0-9] || build_snappy;
test -f $BASEDIR/system/lib/libsnappy.so.[0-9].[0-9].[0-9].* || build_snappy;
# Build WiredTiger
[ -d $WT_DIR ] || get_wt;
[ -d $BASEDIR/$WT_DIR ] || (echo "Missing WiredTiger source directory" && exit 1)
test -f $BASEDIR/system/lib/libwiredtiger-[0-9].[0-9].[0-9].so \
-a -f $BASEDIR/system/lib/libwiredtiger_snappy.so || build_wt;
test -f $BASEDIR/system/lib/libwiredtiger-[0-9].[0-9].[0-9].${SUFFIXES} -a \
-f $BASEDIR/system/lib/libwiredtiger_snappy.${SUFFIXES} || build_wt;
[ -d $BASEDIR/../priv ] || mkdir ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/bin/wt ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/lib/libwiredtiger-[0-9].[0-9].[0-9].so ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/lib/libwiredtiger_snappy.so* ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/lib/libsnappy.so* ${BASEDIR}/../priv
cp -p -P $BASEDIR/system/bin/wt ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libwiredtiger-[0-9].[0-9].[0-9].${SUFFIXES} ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libwiredtiger_snappy.${SUFFIXES} ${BASEDIR}/../priv
cp -p -P ${BASEDIR}/system/lib/libsnappy.${SUFFIXES}* ${BASEDIR}/../priv
;;
esac

View file

@ -1,5 +1,5 @@
diff --git a/ext/compressors/snappy/Makefile.am b/ext/compressors/snappy/Makefile.am
index 6d78823..2122cf8 100644
index 6d78823..c423590 100644
--- a/ext/compressors/snappy/Makefile.am
+++ b/ext/compressors/snappy/Makefile.am
@@ -2,5 +2,6 @@ AM_CPPFLAGS = -I$(top_builddir) -I$(top_srcdir)/src/include
@ -7,100 +7,6 @@ index 6d78823..2122cf8 100644
lib_LTLIBRARIES = libwiredtiger_snappy.la
libwiredtiger_snappy_la_SOURCES = snappy_compress.c
-libwiredtiger_snappy_la_LDFLAGS = -avoid-version -module
+libwiredtiger_snappy_la_CFLAGS = -I$(src_builddir)/../../system/include
+libwiredtiger_snappy_la_LDFLAGS = -avoid-version -module -L$(src_builddir)/../../system/lib -Wl,-rpath,lib/wterl-0.9.0/priv:lib/wterl/priv:priv
+libwiredtiger_snappy_la_CFLAGS = -I$(abs_top_builddir)/../../system/include
+libwiredtiger_snappy_la_LDFLAGS = -avoid-version -module -L$(abs_top_builddir)/../../system/lib -Wl,-rpath,lib/wterl-0.9.0/priv:lib/wterl/priv:priv
libwiredtiger_snappy_la_LIBADD = -lsnappy
diff --git a/src/support/cksum.c b/src/support/cksum.c
index 7e9befe..b924db7 100644
--- a/src/support/cksum.c
+++ b/src/support/cksum.c
@@ -27,6 +27,13 @@
#include "wt_internal.h"
+#if defined(__amd64) || defined(__x86_64)
+#define USE_HARDWARE_CRC32 1
+#else
+#undef USE_HARDWARE_CRC32
+#endif
+
+#ifdef USE_HARDWARE_CRC32
static const uint32_t g_crc_slicing[8][256] = {
#ifdef WORDS_BIGENDIAN
/*
@@ -1078,6 +1085,7 @@ static const uint32_t g_crc_slicing[8][256] = {
}
#endif
};
+#endif /* USE_HARDWARE_CRC32 */
/*
* __wt_cksum --
@@ -1106,15 +1114,29 @@ __wt_cksum(const void *chunk, size_t len)
/* Checksum one byte at a time to the first 4B boundary. */
for (p = chunk;
((uintptr_t)p & (sizeof(uint32_t) - 1)) != 0 &&
- len > 0; ++p, --len)
+ len > 0; ++p, --len) {
+#ifdef USE_HARDWARE_CRC32
+ __asm__ __volatile__(
+ ".byte 0xF2, 0x0F, 0x38, 0xF0, 0xF1"
+ : "=S" (crc)
+ : "0" (crc), "c" (*p));
+#else
#ifdef WORDS_BIGENDIAN
crc = g_crc_slicing[0][((crc >> 24) ^ *p) & 0xFF] ^ (crc << 8);
#else
crc = g_crc_slicing[0][(crc ^ *p) & 0xFF] ^ (crc >> 8);
#endif
+#endif
+ }
/* Checksum in 8B chunks. */
for (nqwords = len / sizeof(uint64_t); nqwords; nqwords--) {
+#ifdef USE_HARDWARE_CRC32
+ __asm__ __volatile__ (
+ ".byte 0xf2, 0x48, 0x0f, 0x38, 0xf0, 0xf1;"
+ : "=S"(crc)
+ : "S"(crc), "c"(*p));
+#else
crc ^= *(uint32_t *)p;
p += sizeof(uint32_t);
next = *(uint32_t *)p;
@@ -1139,22 +1161,32 @@ __wt_cksum(const void *chunk, size_t len)
g_crc_slicing[1][(next >> 16) & 0xFF] ^
g_crc_slicing[0][(next >> 24)];
#endif
+#endif
}
/* Checksum trailing bytes one byte at a time. */
+ for (len &= 0x7; len > 0; ++p, len--) {
+#ifdef USE_HARDWARE_CRC32
+ __asm__ __volatile__(
+ ".byte 0xF2, 0x0F, 0x38, 0xF0, 0xF1"
+ : "=S" (crc)
+ : "0" (crc), "c" (*p));
+#else
#ifdef WORDS_BIGENDIAN
- for (len &= 0x7; len > 0; ++p, len--)
crc = g_crc_slicing[0][((crc >> 24) ^ *p) & 0xFF] ^ (crc << 8);
+#else
+ crc = g_crc_slicing[0][(crc ^ *p) & 0xFF] ^ (crc >> 8);
+#endif
+#endif
+ }
+#ifdef WORDS_BIGENDIAN
/* Do final byte swap to produce a result identical to little endian */
crc =
((crc << 24) & 0xFF000000) |
((crc << 8) & 0x00FF0000) |
((crc >> 8) & 0x0000FF00) |
((crc >> 24) & 0x000000FF);
-#else
- for (len &= 0x7; len > 0; ++p, len--)
- crc = g_crc_slicing[0][(crc ^ *p) & 0xFF] ^ (crc >> 8);
#endif
return (~crc);
}

View file

@ -31,7 +31,6 @@
#include "common.h"
#include "async_nif.h"
#include "queue.h"
#include "atomic.h"
#define MAX_CACHE_SIZE ASYNC_NIF_MAX_WORKERS
@ -60,7 +59,6 @@ typedef struct wterl_conn {
STAILQ_HEAD(ctxs, wterl_ctx) cache;
ErlNifMutex *cache_mutex;
uint32_t cache_size;
struct wterl_ctx *mru_ctx[ASYNC_NIF_MAX_WORKERS];
} WterlConnHandle;
typedef struct {
@ -206,7 +204,7 @@ __ctx_cache_evict(WterlConnHandle *conn_handle)
STAILQ_REMOVE(&conn_handle->cache, c, wterl_ctx, entries);
if (c->session)
c->session->close(c->session, NULL);
enif_free(c);
free(c);
num_evicted++;
}
}
@ -288,6 +286,7 @@ __retain_ctx(WterlConnHandle *conn_handle, uint32_t worker_id,
struct wterl_ctx **ctx,
int count, const char *session_config, ...)
{
UNUSED(worker_id);
int i = 0;
uint32_t hash = 0;
uint32_t crc = 0;
@ -324,64 +323,50 @@ __retain_ctx(WterlConnHandle *conn_handle, uint32_t worker_id,
DPRINTF("sig %llu [%u:%u]", PRIuint64(sig), crc, hash);
va_end(ap);
c = NULL;
do c = conn_handle->mru_ctx[worker_id];
while(c && !ATOMIC_CAS_FULL(&conn_handle->mru_ctx[worker_id], c, 0));
if (c && c->sig == sig) {
// mru hit:
DPRINTF("[%.4u] mru hit: %llu found", worker_id, PRIuint64(sig));
*ctx = c;
// check the cache
c = __ctx_cache_find(conn_handle, sig);
if (c == NULL) {
// cache miss:
DPRINTF("[%.4u] cache miss: %llu [cache size: %d]", worker_id, PRIuint64(sig), conn_handle->cache_size);
WT_CONNECTION *conn = conn_handle->conn;
WT_SESSION *session = NULL;
int rc = conn->open_session(conn, NULL, session_config, &session);
if (rc != 0) return rc;
size_t s = sizeof(struct wterl_ctx) + (count * sizeof(struct cursor_info)) + sig_len;
c = malloc(s); // TODO: enif_alloc_resource()
if (c == NULL) {
session->close(session, NULL);
return ENOMEM;
}
memset(c, 0, s);
c->sig = sig;
c->session = session;
c->sig_len = sig_len;
char *p = (char *)c + (s - sig_len);
c->session_config = __copy_str_into(&p, session_config);
c->num_cursors = count;
session_config = arg;
va_start(ap, session_config);
for (i = 0; i < count; i++) {
const char *uri = va_arg(ap, const char *);
const char *config = va_arg(ap, const char *);
// TODO: what to do (if anything) when uri or config is NULL?
c->ci[i].uri = __copy_str_into(&p, uri);
c->ci[i].config = __copy_str_into(&p, config);
rc = session->open_cursor(session, uri, NULL, config, &c->ci[i].cursor);
if (rc != 0) {
free(c);
session->close(session, NULL); // this will free the cursors too
va_end(ap);
return rc;
}
}
va_end(ap);
} else {
// mru miss: check the cache
DPRINTF("[%.4u] mru miss or empty", worker_id);
c = __ctx_cache_find(conn_handle, sig);
if (c == NULL) {
// cache miss:
DPRINTF("[%.4u] cache miss: %llu [cache size: %d]", worker_id, PRIuint64(sig), conn_handle->cache_size);
WT_CONNECTION *conn = conn_handle->conn;
WT_SESSION *session = NULL;
int rc = conn->open_session(conn, NULL, session_config, &session);
if (rc != 0) {
return rc;
}
size_t s = sizeof(struct wterl_ctx) + (count * sizeof(struct cursor_info)) + sig_len;
c = enif_alloc(s); // TODO: enif_alloc_resource()
if (c == NULL) {
session->close(session, NULL);
return ENOMEM;
}
memset(c, 0, s);
c->sig = sig;
c->session = session;
c->sig_len = sig_len;
char *p = (char *)c + (s - sig_len);
c->session_config = __copy_str_into(&p, session_config);
c->num_cursors = count;
session_config = arg;
va_start(ap, session_config);
for (i = 0; i < count; i++) {
const char *uri = va_arg(ap, const char *);
const char *config = va_arg(ap, const char *);
// TODO: what to do (if anything) when uri or config is NULL?
c->ci[i].uri = __copy_str_into(&p, uri);
c->ci[i].config = __copy_str_into(&p, config);
rc = session->open_cursor(session, uri, NULL, config, &c->ci[i].cursor);
if (rc != 0) {
enif_free(c);
session->close(session, NULL); // this will free the cursors too
va_end(ap);
return rc;
}
}
va_end(ap);
*ctx = c;
} else {
// cache hit:
DPRINTF("[%.4u] cache hit: %llu [cache size: %d]", worker_id, PRIuint64(sig), conn_handle->cache_size);
*ctx = c;
}
// cache hit:
DPRINTF("[%.4u] cache hit: %llu [cache size: %d]", worker_id, PRIuint64(sig), conn_handle->cache_size);
}
*ctx = c;
return 0;
}
@ -391,6 +376,7 @@ __retain_ctx(WterlConnHandle *conn_handle, uint32_t worker_id,
static void
__release_ctx(WterlConnHandle *conn_handle, uint32_t worker_id, struct wterl_ctx *ctx)
{
UNUSED(worker_id);
uint32_t i;
WT_CURSOR *cursor;
@ -398,13 +384,8 @@ __release_ctx(WterlConnHandle *conn_handle, uint32_t worker_id, struct wterl_ctx
cursor = ctx->ci[i].cursor;
cursor->reset(cursor);
}
struct wterl_ctx *c = conn_handle->mru_ctx[worker_id];
if (!ATOMIC_CAS_FULL(&conn_handle->mru_ctx[worker_id], c, ctx)) {
if (c) __ctx_cache_add(conn_handle, c);
} else __ctx_cache_add(conn_handle, ctx);
DPRINTF("[%.4u] reset %d cursors, returned ctx to cache", worker_id, ctx->num_cursors);
__ctx_cache_add(conn_handle, ctx);
DPRINTF("[%.4u] reset %d cursors, returnd ctx to cache", worker_id, ctx->num_cursors);
}
/**
@ -416,14 +397,6 @@ void
__close_all_sessions(WterlConnHandle *conn_handle)
{
struct wterl_ctx *c, *n;
int worker_id;
// clear out the mru
for (worker_id = 0; worker_id < ASYNC_NIF_MAX_WORKERS; worker_id++) {
do c = conn_handle->mru_ctx[worker_id];
while(c && !ATOMIC_CAS_FULL(&conn_handle->mru_ctx[worker_id], c, 0));
if (c) { c->session->close(c->session, NULL); enif_free(c); }
}
// clear out the cache
c = STAILQ_FIRST(&conn_handle->cache);
@ -432,7 +405,7 @@ __close_all_sessions(WterlConnHandle *conn_handle)
STAILQ_REMOVE(&conn_handle->cache, c, wterl_ctx, entries);
conn_handle->cache_size -= 1;
c->session->close(c->session, NULL);
enif_free(c);
free(c);
c = n;
}
}
@ -446,30 +419,9 @@ void
__close_cursors_on(WterlConnHandle *conn_handle, const char *uri)
{
struct wterl_ctx *c, *n;
int worker_id, idx, cnt;
int idx, cnt;
// walk the mru first, look for open cursors on matching uri
for (worker_id = 0; worker_id < ASYNC_NIF_MAX_WORKERS; worker_id++) {
do c = conn_handle->mru_ctx[worker_id];
while(c && !ATOMIC_CAS_FULL(&conn_handle->mru_ctx[worker_id], c, 0));
if (c) {
cnt = c->num_cursors;
for(idx = 0; idx < cnt; idx++) {
if (!strcmp(c->ci[idx].uri, uri)) {
c->session->close(c->session, NULL);
enif_free(c);
break;
} else {
// not a match, be lazy and add it to the cache rather than
// putting it back on the mru
__ctx_cache_add(conn_handle, c);
}
}
}
}
// next we walk the cache, look for open cursors on matching uri
// walk the entries in the cache, look for open cursors on matching uri
c = STAILQ_FIRST(&conn_handle->cache);
while (c != NULL) {
n = STAILQ_NEXT(c, entries);
@ -479,7 +431,7 @@ __close_cursors_on(WterlConnHandle *conn_handle, const char *uri)
STAILQ_REMOVE(&conn_handle->cache, c, wterl_ctx, entries);
conn_handle->cache_size -= 1;
c->session->close(c->session, NULL);
enif_free(c);
free(c);
break;
}
}
@ -488,6 +440,7 @@ __close_cursors_on(WterlConnHandle *conn_handle, const char *uri)
return;
}
/**
* Callback to handle error messages.
*
@ -502,13 +455,15 @@ __close_cursors_on(WterlConnHandle *conn_handle, const char *uri)
* operation or library failure.
*/
int
__wterl_error_handler(WT_EVENT_HANDLER *handler, int error, const char *message)
__wterl_error_handler(WT_EVENT_HANDLER *handler, WT_SESSION *session,
int error, const char *message)
{
struct wterl_event_handlers *eh = (struct wterl_event_handlers *)handler;
ErlNifEnv *msg_env;
ErlNifPid *to_pid;
int rc = 0;
UNUSED(session);
enif_mutex_lock(eh->error_mutex);
msg_env = eh->msg_env_error;
to_pid = &eh->to_pid;
@ -540,13 +495,14 @@ __wterl_error_handler(WT_EVENT_HANDLER *handler, int error, const char *message)
* operation or library failure.
*/
int
__wterl_message_handler(WT_EVENT_HANDLER *handler, const char *message)
__wterl_message_handler(WT_EVENT_HANDLER *handler, WT_SESSION *session, const char *message)
{
struct wterl_event_handlers *eh = (struct wterl_event_handlers *)handler;
ErlNifEnv *msg_env;
ErlNifPid *to_pid;
int rc = 0;
UNUSED(session);
enif_mutex_lock(eh->message_mutex);
msg_env = eh->msg_env_message;
to_pid = &eh->to_pid;
@ -577,13 +533,14 @@ __wterl_message_handler(WT_EVENT_HANDLER *handler, const char *message)
* operation or library failure.
*/
int
__wterl_progress_handler(WT_EVENT_HANDLER *handler, const char *operation, uint64_t counter)
__wterl_progress_handler(WT_EVENT_HANDLER *handler, WT_SESSION *session, const char *operation, uint64_t counter)
{
struct wterl_event_handlers *eh = (struct wterl_event_handlers *)handler;
ErlNifEnv *msg_env;
ErlNifPid *to_pid;
int rc = 0;
UNUSED(session);
enif_mutex_lock(eh->progress_mutex);
msg_env = eh->msg_env_progress;
to_pid = &eh->to_pid;
@ -685,7 +642,7 @@ ASYNC_NIF_DECL(
return;
}
if (session_config.size > 1) {
char *sc = enif_alloc(session_config.size);
char *sc = malloc(session_config.size);
if (!sc) {
enif_release_resource(conn_handle);
ASYNC_NIF_REPLY(__strerror_term(env, ENOMEM));
@ -696,7 +653,7 @@ ASYNC_NIF_DECL(
} else {
conn_handle->session_config = NULL;
}
conn_handle->cache_mutex = enif_mutex_create(NULL);
conn_handle->cache_mutex = enif_mutex_create("conn_handle");
enif_mutex_lock(conn_handle->cache_mutex);
conn_handle->conn = conn;
ERL_NIF_TERM result = enif_make_resource(env, conn_handle);
@ -744,8 +701,9 @@ ASYNC_NIF_DECL(
/* Free up the shared sessions and cursors. */
enif_mutex_lock(args->conn_handle->cache_mutex);
__close_all_sessions(args->conn_handle);
if (args->conn_handle->session_config) {
enif_free((char *)args->conn_handle->session_config);
free((char *)args->conn_handle->session_config);
args->conn_handle->session_config = NULL;
}
WT_CONNECTION* conn = args->conn_handle->conn;
@ -1383,6 +1341,10 @@ ASYNC_NIF_DECL(
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
if (key.size == 0) {
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
struct wterl_ctx *ctx = NULL;
WT_CURSOR *cursor = NULL;
@ -1442,6 +1404,10 @@ ASYNC_NIF_DECL(
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
if (key.size == 0) {
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
struct wterl_ctx *ctx = NULL;
WT_CURSOR *cursor = NULL;
@ -1527,6 +1493,10 @@ ASYNC_NIF_DECL(
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
if (key.size == 0 || value.size == 0) {
ASYNC_NIF_REPLY(enif_make_badarg(env));
return;
}
struct wterl_ctx *ctx = NULL;
WT_CURSOR *cursor = NULL;
@ -1600,7 +1570,7 @@ ASYNC_NIF_DECL(
}
WT_CURSOR* cursor;
rc = session->open_cursor(session, args->uri, NULL, (config.data[0] != 0) ? (char *)config.data : "overwrite,raw", &cursor);
rc = session->open_cursor(session, args->uri, NULL, (config.data[0] != 0) ? (char *)config.data : "raw", &cursor);
if (rc != 0) {
session->close(session, NULL);
ASYNC_NIF_REPLY(__strerror_term(env, rc));
@ -2300,15 +2270,15 @@ on_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info)
ATOM_WIREDTIGER_VSN = enif_make_atom(env, "wiredtiger_vsn");
ATOM_MSG_PID = enif_make_atom(env, "message_pid");
struct wterl_priv_data *priv = enif_alloc(sizeof(struct wterl_priv_data));
struct wterl_priv_data *priv = malloc(sizeof(struct wterl_priv_data));
if (!priv)
return ENOMEM;
memset(priv, 0, sizeof(struct wterl_priv_data));
struct wterl_event_handlers *eh = &priv->eh;
eh->error_mutex = enif_mutex_create(NULL);
eh->message_mutex = enif_mutex_create(NULL);
eh->progress_mutex = enif_mutex_create(NULL);
eh->error_mutex = enif_mutex_create("error_mutex");
eh->message_mutex = enif_mutex_create("message_mutex");
eh->progress_mutex = enif_mutex_create("progress_mutex");
/* Process the load_info array of tuples, we expect:
[{wterl_vsn, "a version string"},
@ -2328,17 +2298,17 @@ on_load(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info)
/* Note: !!! the first element of our priv_data struct *must* be the
pointer to the async_nif's private data which we set here. */
ASYNC_NIF_LOAD(wterl, priv->async_nif_priv);
ASYNC_NIF_LOAD(wterl, env, priv->async_nif_priv);
if (!priv->async_nif_priv) {
memset(priv, 0, sizeof(struct wterl_priv_data));
enif_free(priv);
free(priv);
return ENOMEM;
}
*priv_data = priv;
char msg[1024];
snprintf(msg, 1024, "NIF on_load complete (wterl version: %s, wiredtiger version: %s)", priv->wterl_vsn, priv->wiredtiger_vsn);
__wterl_message_handler((WT_EVENT_HANDLER *)&priv->eh, msg);
__wterl_message_handler((WT_EVENT_HANDLER *)&priv->eh, NULL, msg);
return 0;
}
@ -2377,7 +2347,7 @@ on_unload(ErlNifEnv *env, void *priv_data)
enif_free_env(eh->msg_env_progress);
memset(priv, 0, sizeof(struct wterl_priv_data));
enif_free(priv);
free(priv);
priv_data = NULL;
}

6
priv/wterl.schema Normal file
View file

@ -0,0 +1,6 @@
%%%% This is the WiredTiger section
%% @doc wiredtiger data_root
{mapping, "wiredtiger.data_root", "wterl.data_root", [
{default, "{{platform_data_dir}}/wiredtiger"}
]}.

View file

@ -5,13 +5,15 @@
{cover_enabled, true}.
{eunit_opts, [verbose, {report, {eunit_surefire, [{dir, "."}]}}]}.
%{eunit_opts, [verbose, {report, {eunit_surefire, [{dir, "."}]}}]}.
{erl_opts, [
%native, {hipe, [o3,verbose]}, inline, {inline_size, 1024},
{parse_transform, lager_transform},
debug_info, %{d,'DEBUG',true},
%strict_validation,
%fail_on_warning,
debug_info,
{d,'DEBUG',true},
strict_validation,
fail_on_warning,
%warn_missing_spec,
warn_bif_clash,
warn_deprecated_function,
@ -22,7 +24,7 @@
warn_shadow_vars,
warn_untyped_record,
warn_unused_function,
%warn_unused_import,
warn_unused_import,
warn_unused_record,
warn_unused_vars
]}.
@ -36,8 +38,8 @@
{port_specs, [{"priv/wterl.so", ["c_src/*.c"]}]}.
{port_env, [
{"DRV_CFLAGS", "$DRV_CFLAGS -fPIC -Wall -Wextra -Werror -I c_src/system/include"},
{"DRV_LDFLAGS", "$DRV_LDFLAGS -Wl,-rpath,lib/wterl/priv:priv -Lc_src/system/lib -lwiredtiger"}
{"DRV_CFLAGS", "$DRV_CFLAGS -O3 -mtune=native -march=native -fPIC -Wall -Wextra -Werror -I c_src/system/include"},
{"DRV_LDFLAGS", "$DRV_LDFLAGS -Wl,-rpath,lib/wterl/priv:lib/wterl-0.9.0/priv:priv -Lc_src/system/lib -lwiredtiger"}
]}.
{pre_hooks, [{compile, "c_src/build_deps.sh compile"}]}.

View file

@ -21,27 +21,34 @@
%%
%% -------------------------------------------------------------------
-spec async_nif_enqueue(reference(), function(), [term()]) -> term() | {error, term()}.
async_nif_enqueue(R, F, A) ->
case erlang:apply(F, [R|A]) of
{ok, enqueued} ->
receive
{R, {error, shutdown}=Error} ->
%% Work unit was queued, but not executed.
Error;
{R, {error, _Reason}=Error} ->
%% Work unit returned an error.
Error;
{R, Reply} ->
Reply
end;
{error, eagain} ->
%% Work unit was not queued, try again.
async_nif_enqueue(R, F, A);
%{error, enomem} ->
%{error, shutdown} ->
Other ->
Other
end.
-define(ASYNC_NIF_CALL(Fun, Args), async_nif_enqueue(erlang:make_ref(), Fun, Args)).
-define(ASYNC_NIF_CALL(Fun, Args),
F = fun(F, T) ->
R = erlang:make_ref(),
case erlang:apply(Fun, [R|Args]) of
{ok, {enqueued, PctBusy}} ->
if
PctBusy > 0.25 andalso PctBusy =< 1.0 ->
erlang:bump_reductions(erlang:trunc(2000 * PctBusy));
true ->
ok
end,
receive
{R, {error, shutdown}=Error} ->
%% Work unit was queued, but not executed.
Error;
{R, {error, _Reason}=Error} ->
%% Work unit returned an error.
Error;
{R, Reply} ->
Reply
end;
{error, eagain} ->
case T of
3 -> not_found;
_ -> F(F, T + 1)
end;
Other ->
Other
end
end,
F(F, 1)).

View file

@ -22,6 +22,7 @@
-module(riak_kv_wterl_backend).
-behavior(temp_riak_kv_backend).
-compile([{parse_transform, lager_transform}]).
%% KV Backend API
-export([api_version/0,
@ -42,7 +43,7 @@
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compiel(export_all).
-compile(export_all).
-endif.
-define(API_VERSION, 1).
@ -119,14 +120,14 @@ start(Partition, Config) ->
"lsm" ->
[{internal_page_max, "128K"},
{leaf_page_max, "16K"},
{lsm_chunk_size, "100MB"},
{lsm_merge_threads, 2},
{prefix_compression, true},
{lsm_bloom_newest, true},
{lsm_bloom_oldest, true} ,
{lsm_bloom_bit_count, 28},
{lsm_bloom_hash_count, 19},
{lsm_bloom_config, [{leaf_page_max, "8MB"}]}
{lsm, [
{bloom_config, [{leaf_page_max, "8MB"}]},
{bloom_bit_count, 28},
{bloom_hash_count, 19},
{bloom_oldest, true},
{chunk_size, "100MB"},
{merge_threads, 2}
]}
] ++ Compressor;
"table" ->
Compressor
@ -341,22 +342,23 @@ is_empty(#state{connection=Connection, table=Table}) ->
%% @doc Get the status information for this wterl backend
-spec status(state()) -> [{atom(), term()}].
status(#state{connection=Connection, table=Table}) ->
case wterl:cursor_open(Connection, Table) of
{ok, Cursor} ->
TheStats =
case fetch_status(Cursor) of
{ok, Stats} ->
Stats;
{error, {eperm, _}} -> % TODO: review/fix this logic
{ok, []};
_ ->
{ok, []}
end,
wterl:cursor_close(Cursor),
TheStats;
{error, Reason2} ->
{error, Reason2}
end.
[].
%% case wterl:cursor_open(Connection, "statistics:" ++ Table, [{statistics_fast, true}]) of
%% {ok, Cursor} ->
%% TheStats =
%% case fetch_status(Cursor) of
%% {ok, Stats} ->
%% Stats;
%% {error, {eperm, _}} -> % TODO: review/fix this logic
%% {ok, []};
%% _ ->
%% {ok, []}
%% end,
%% wterl:cursor_close(Cursor),
%% TheStats;
%% {error, Reason2} ->
%% {error, Reason2}
%% end.
%% @doc Register an asynchronous callback
-spec callback(reference(), any(), state()) -> {ok, state()}.
@ -399,32 +401,41 @@ establish_connection(Config, Type) ->
ok = filelib:ensure_dir(filename:join(DataRoot, "x")),
%% WT Connection Options:
%% NOTE: LSM auto-checkpoints, so we don't have too.
LogSetting = app_helper:get_prop_or_env(log, Config, wterl, false),
CheckpointSetting =
case Type =:= "lsm" of
true ->
[];
case LogSetting of
true ->
%% Turn checkpoints on if logging is on, checkpoints enable log archival.
app_helper:get_prop_or_env(checkpoint, Config, wterl, [{wait, 30}]); % in seconds
_ ->
[]
end;
false ->
app_helper:get_prop_or_env(checkpoint, Config, wterl, [{wait, 10}])
app_helper:get_prop_or_env(checkpoint, Config, wterl, [{wait, 30}])
end,
RequestedCacheSize = app_helper:get_prop_or_env(cache_size, Config, wterl),
ConnectionOpts =
orddict:from_list(
[ wterl:config_value(create, Config, true),
wterl:config_value(sync, Config, false),
wterl:config_value(logging, Config, true),
wterl:config_value(checkpoint_sync, Config, false),
wterl:config_value(transaction_sync, Config, "none"),
wterl:config_value(log, Config, [{enabled, LogSetting}]),
wterl:config_value(mmap, Config, false),
wterl:config_value(checkpoint, Config, CheckpointSetting),
wterl:config_value(session_max, Config, max_sessions(Config)),
wterl:config_value(cache_size, Config, size_cache(RequestedCacheSize)),
wterl:config_value(statistics_log, Config, [{wait, 300}]), % sec
wterl:config_value(statistics, Config, [ "fast", "clear"]),
wterl:config_value(statistics_log, Config, [{wait, 600}]), % in seconds
wterl:config_value(verbose, Config, [ "salvage", "verify"
% Note: for some unknown reason, if you add these additional
% verbose flags Erlang SEGV's "size_object: bad tag for 0x80"
% no idea why... yet... you've been warned.
% no idea why... you've been warned.
%"block", "shared_cache", "reconcile", "evict", "lsm",
%"fileops", "read", "write", "readserver", "evictserver",
%"hazard", "mutex", "ckpt"
]) ] ++ CheckpointSetting ++ proplists:get_value(wterl, Config, [])), % sec
]) ] ++ proplists:get_value(wterl, Config, [])), % sec
%% WT Session Options:
SessionOpts = [{isolation, "snapshot"}],
@ -545,15 +556,15 @@ from_index_key(LKey) ->
%% @private
%% Return all status from wterl statistics cursor
fetch_status(Cursor) ->
{ok, fetch_status(Cursor, wterl:cursor_next_value(Cursor), [])}.
fetch_status(_Cursor, {error, _}, Acc) ->
lists:reverse(Acc);
fetch_status(_Cursor, not_found, Acc) ->
lists:reverse(Acc);
fetch_status(Cursor, {ok, Stat}, Acc) ->
[What,Val|_] = [binary_to_list(B) || B <- binary:split(Stat, [<<0>>], [global])],
fetch_status(Cursor, wterl:cursor_next_value(Cursor), [{What,Val}|Acc]).
%% fetch_status(Cursor) ->
%% {ok, fetch_status(Cursor, wterl:cursor_next_value(Cursor), [])}.
%% fetch_status(_Cursor, {error, _}, Acc) ->
%% lists:reverse(Acc);
%% fetch_status(_Cursor, not_found, Acc) ->
%% lists:reverse(Acc);
%% fetch_status(Cursor, {ok, Stat}, Acc) ->
%% [What,Val|_] = [binary_to_list(B) || B <- binary:split(Stat, [<<0>>], [global])],
%% fetch_status(Cursor, wterl:cursor_next_value(Cursor), [{What,Val}|Acc]).
size_cache(RequestedSize) ->
Size =

View file

@ -96,8 +96,8 @@ nif_stub_error(Line) ->
-spec init() -> ok | {error, any()}.
init() ->
erlang:load_nif(filename:join([priv_dir(), atom_to_list(?MODULE)]),
[{wterl_vsn, "53307e8"},
{wiredtiger_vsn, "1.6.2-0-g07cb0a5"}]).
[{wterl_vsn, "942e51b"},
{wiredtiger_vsn, "1.6.4-275-g9c44420"}]). %% TODO automate these
-spec connection_open(string(), config_list()) -> {ok, connection()} | {error, term()}.
-spec connection_open(string(), config_list(), config_list()) -> {ok, connection()} | {error, term()}.
@ -256,6 +256,7 @@ verify_nif(_AsyncRef, _Ref, _Name, _Config) ->
-spec cursor_open(connection(), string(), config_list()) -> {ok, cursor()} | {error, term()}.
cursor_open(Ref, Table) ->
cursor_open(Ref, Table, []).
cursor_open(Ref, Table, Config) ->
?ASYNC_NIF_CALL(fun cursor_open_nif/4, [Ref, Table, config_to_bin(Config)]).
@ -453,17 +454,26 @@ config_to_bin([], Acc) ->
config_to_bin([{Key, Value} | Rest], Acc) ->
ConfigTypes =
[{block_compressor, {string, quoted}},
{bloom_bit_count, integer},
{bloom_config, config},
{bloom_hash_count, integer},
{bloom_newest, bool},
{bloom_oldest, bool},
{cache_size, string},
{checkpoint, config},
{checkpoint_sync, bool},
{checksum, string},
{chunk_size, string},
{create, bool},
{direct_io, list},
{drop, list},
{enabled, bool},
{error_prefix, string},
{eviction_target, integer},
{eviction_trigger, integer},
{extensions, {list, quoted}},
{statistics_fast, bool},
{file_max, string},
{force, bool},
{from, string},
{hazard_max, integer},
@ -473,22 +483,21 @@ config_to_bin([{Key, Value} | Rest], Acc) ->
{isolation, string},
{key_type, string},
{leaf_page_max, string},
{logging, bool},
{lsm_bloom_bit_count, integer},
{lsm_bloom_config, config},
{lsm_bloom_hash_count, integer},
{lsm_bloom_newest, bool},
{lsm_bloom_oldest, bool},
{lsm_chunk_size, string},
{prefix_compression, bool},
{lsm_merge_threads, integer},
{log, config},
{lsm, config},
{mmap, bool},
{merge_threads, integer},
{multiprocess, bool},
{name, string},
{overwrite, bool},
{prefix_compression, bool},
{raw, bool},
{session_max, integer},
{statistics, list},
{statistics_log, config},
{sync, bool},
{target, {list, quoted}},
{to, string},
{transaction_sync, string},
{transactional, bool},
{verbose, list},
{wait, integer}],
@ -610,7 +619,7 @@ many_open_tables_test_() ->
DataDir = ?TEST_DATA_DIR,
KeyGen =
fun(X) ->
crypto:sha(<<X>>)
crypto:hash(sha, <<X>>)
end,
ValGen =
fun() ->
@ -872,7 +881,7 @@ various_cursor_test_() ->
end},
{"update an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
{ok, Cursor} = cursor_open(ConnRef, "table:test", [{overwrite, false}, {raw,true}]),
?assertMatch(ok, cursor_update(Cursor, <<"g">>, <<"goji berries">>)),
?assertMatch(not_found, cursor_update(Cursor, <<"k">>, <<"kumquat">>)),
?assertMatch(ok, cursor_close(Cursor)),
@ -880,7 +889,7 @@ various_cursor_test_() ->
end},
{"remove an item using a cursor",
fun() ->
{ok, Cursor} = cursor_open(ConnRef, "table:test"),
{ok, Cursor} = cursor_open(ConnRef, "table:test", [{overwrite, false}, {raw,true}]),
?assertMatch(ok, cursor_remove(Cursor, <<"g">>)),
?assertMatch(not_found, cursor_remove(Cursor, <<"l">>)),
?assertMatch(ok, cursor_close(Cursor)),

View file

@ -25,7 +25,13 @@
{mode, max}.
{duration, 10}.
{concurrent, 4}.
{concurrent, 16}.
{report_interval, 1}.
{pb_timeout_general, 1000}. % ms
%{pb_timeout_read, ?}.
%{pb_timeout_write, ?}.
%{pb_timeout_listkeys, ?}.
%{pb_timeout_mapreduce, ?}.
{driver, basho_bench_driver_wterl}.
{key_generator, {int_to_bin_littleendian,{uniform_int, 5000000}}}.
{value_generator, {fixed_bin, 10000}}.
@ -37,9 +43,9 @@
{wterl, [
{connection, [
{create, true},
{sync, false},
{logging, true},
{transactional, true},
{session_sync, false},
{transaction_sync, "none"},
{log, [{enabled, false}]},
{session_max, 1024},
{cache_size, 4294967296},
{verbose, []},
@ -54,11 +60,11 @@
]},
{session, [ {isolation, "snapshot"} ]},
{table_uri, "lsm:test"},
{lsm_merge_threads, 2},
{table, [
{internal_page_max, "128K"},
{leaf_page_max, "128K"},
{lsm_chunk_size, "25MB"},
{prefix_compression, false},
{lsm_bloom_newest, true},
{lsm_bloom_oldest, true} ,
{lsm_bloom_bit_count, 128},
@ -72,9 +78,9 @@
{wterl_, [
{connection, [
{create, true},
{sync, false},
{logging, true},
{transactional, true},
{session_sync, false},
{transaction_sync, "none"},
{log, [{enabled, false}]},
{session_max, 1024},
{cache_size, 4294967296},
{verbose, []},
@ -91,7 +97,6 @@
{session, [ {isolation, "snapshot"} ]},
{table_uri, "table:test"},
{table, [
{prefix_compression, false},
{block_compressor, "snappy"} % bzip2
]}
]}.