WIP: more caching work, still not done.
This commit is contained in:
parent
a2cd1d562c
commit
f0d5baeb0e
4 changed files with 981 additions and 54 deletions
61
c_src/common.h
Normal file
61
c_src/common.h
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (c) 2012 Basho Technologies, Inc. All Rights Reserved.
|
||||
* Author: Gregory Burd <greg@basho.com> <greg@burd.me>
|
||||
*
|
||||
* This file is provided to you under the Apache License,
|
||||
* Version 2.0 (the "License"); you may not use this file
|
||||
* except in compliance with the License. You may obtain
|
||||
* a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
#ifndef __COMMON_H__
|
||||
#define __COMMON_H__
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#if !(__STDC_VERSION__ >= 199901L || defined(__GNUC__))
|
||||
# undef DEBUG
|
||||
# define DEBUG 0
|
||||
# define DPRINTF (void) /* Vararg macros may be unsupported */
|
||||
#elif DEBUG
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#define DPRINTF(fmt, ...) \
|
||||
do { \
|
||||
fprintf(stderr, "%s:%d " fmt "\n", __func__, __LINE__, __VA_ARGS__); \
|
||||
fflush(stderr); \
|
||||
} while(0)
|
||||
#define DPUTS(arg) DPRINTF("%s", arg)
|
||||
#else
|
||||
#define DPRINTF(fmt, ...) ((void) 0)
|
||||
#define DPUTS(arg) ((void) 0)
|
||||
#endif
|
||||
|
||||
#ifndef __UNUSED
|
||||
#define __UNUSED(v) ((void)(v))
|
||||
#endif
|
||||
|
||||
#ifndef COMPQUIET
|
||||
#define COMPQUIET(n, v) do { \
|
||||
(n) = (v); \
|
||||
(n) = (n); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __COMMON_H__
|
|
@ -586,7 +586,7 @@ static kh_inline khint_t __ac_Wang_hash(khint_t key)
|
|||
@param name Name of the hash table [symbol]
|
||||
@param khval_t Type of values [type]
|
||||
*/
|
||||
#ifdef __x86_64__
|
||||
#ifdef __x86_64__
|
||||
#define KHASH_MAP_INIT_PTR(name, khval_t) \
|
||||
KHASH_INIT(name, void*, khval_t, 1, kh_ptr64_hash_func, kh_ptr64_hash_equal)
|
||||
#else
|
||||
|
|
667
c_src/queue.h
Normal file
667
c_src/queue.h
Normal file
|
@ -0,0 +1,667 @@
|
|||
/*
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 4. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $FreeBSD: src/sys/sys/queue.h,v 1.54 2002/08/05 05:18:43 alfred Exp $
|
||||
*/
|
||||
|
||||
#ifndef _DB_QUEUE_H_
|
||||
#define _DB_QUEUE_H_
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This file defines four types of data structures: singly-linked lists,
|
||||
* singly-linked tail queues, lists and tail queues.
|
||||
*
|
||||
* A singly-linked list is headed by a single forward pointer. The elements
|
||||
* are singly linked for minimum space and pointer manipulation overhead at
|
||||
* the expense of O(n) removal for arbitrary elements. New elements can be
|
||||
* added to the list after an existing element or at the head of the list.
|
||||
* Elements being removed from the head of the list should use the explicit
|
||||
* macro for this purpose for optimum efficiency. A singly-linked list may
|
||||
* only be traversed in the forward direction. Singly-linked lists are ideal
|
||||
* for applications with large datasets and few or no removals or for
|
||||
* implementing a LIFO queue.
|
||||
*
|
||||
* A singly-linked tail queue is headed by a pair of pointers, one to the
|
||||
* head of the list and the other to the tail of the list. The elements are
|
||||
* singly linked for minimum space and pointer manipulation overhead at the
|
||||
* expense of O(n) removal for arbitrary elements. New elements can be added
|
||||
* to the list after an existing element, at the head of the list, or at the
|
||||
* end of the list. Elements being removed from the head of the tail queue
|
||||
* should use the explicit macro for this purpose for optimum efficiency.
|
||||
* A singly-linked tail queue may only be traversed in the forward direction.
|
||||
* Singly-linked tail queues are ideal for applications with large datasets
|
||||
* and few or no removals or for implementing a FIFO queue.
|
||||
*
|
||||
* A list is headed by a single forward pointer (or an array of forward
|
||||
* pointers for a hash table header). The elements are doubly linked
|
||||
* so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before
|
||||
* or after an existing element or at the head of the list. A list
|
||||
* may only be traversed in the forward direction.
|
||||
*
|
||||
* A tail queue is headed by a pair of pointers, one to the head of the
|
||||
* list and the other to the tail of the list. The elements are doubly
|
||||
* linked so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before or
|
||||
* after an existing element, at the head of the list, or at the end of
|
||||
* the list. A tail queue may be traversed in either direction.
|
||||
*
|
||||
* For details on the use of these macros, see the queue(3) manual page.
|
||||
*
|
||||
*
|
||||
* SLIST LIST STAILQ TAILQ
|
||||
* _HEAD + + + +
|
||||
* _HEAD_INITIALIZER + + + +
|
||||
* _ENTRY + + + +
|
||||
* _INIT + + + +
|
||||
* _EMPTY + + + +
|
||||
* _FIRST + + + +
|
||||
* _NEXT + + + +
|
||||
* _PREV - - - +
|
||||
* _LAST - - + +
|
||||
* _FOREACH + + + +
|
||||
* _FOREACH_REVERSE - - - +
|
||||
* _INSERT_HEAD + + + +
|
||||
* _INSERT_BEFORE - + - +
|
||||
* _INSERT_AFTER + + + +
|
||||
* _INSERT_TAIL - - + +
|
||||
* _CONCAT - - + +
|
||||
* _REMOVE_HEAD + - + -
|
||||
* _REMOVE + + + +
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* We #undef all of the macros because there are incompatible versions of this
|
||||
* file and these macros on various systems. What makes the problem worse is
|
||||
* they are included and/or defined by system include files which we may have
|
||||
* already loaded into Berkeley DB before getting here. For example, FreeBSD's
|
||||
* <rpc/rpc.h> includes its system <sys/queue.h>, and VxWorks UnixLib.h defines
|
||||
* several of the LIST_XXX macros. Visual C.NET 7.0 also defines some of these
|
||||
* same macros in Vc7\PlatformSDK\Include\WinNT.h. Make sure we use ours.
|
||||
*/
|
||||
#undef LIST_EMPTY
|
||||
#undef LIST_ENTRY
|
||||
#undef LIST_FIRST
|
||||
#undef LIST_FOREACH
|
||||
#undef LIST_HEAD
|
||||
#undef LIST_HEAD_INITIALIZER
|
||||
#undef LIST_INIT
|
||||
#undef LIST_INSERT_AFTER
|
||||
#undef LIST_INSERT_BEFORE
|
||||
#undef LIST_INSERT_HEAD
|
||||
#undef LIST_NEXT
|
||||
#undef LIST_REMOVE
|
||||
#undef QMD_TRACE_ELEM
|
||||
#undef QMD_TRACE_HEAD
|
||||
#undef QUEUE_MACRO_DEBUG
|
||||
#undef SLIST_EMPTY
|
||||
#undef SLIST_ENTRY
|
||||
#undef SLIST_FIRST
|
||||
#undef SLIST_FOREACH
|
||||
#undef SLIST_FOREACH_PREVPTR
|
||||
#undef SLIST_HEAD
|
||||
#undef SLIST_HEAD_INITIALIZER
|
||||
#undef SLIST_INIT
|
||||
#undef SLIST_INSERT_AFTER
|
||||
#undef SLIST_INSERT_HEAD
|
||||
#undef SLIST_NEXT
|
||||
#undef SLIST_REMOVE
|
||||
#undef SLIST_REMOVE_HEAD
|
||||
#undef STAILQ_CONCAT
|
||||
#undef STAILQ_EMPTY
|
||||
#undef STAILQ_ENTRY
|
||||
#undef STAILQ_FIRST
|
||||
#undef STAILQ_FOREACH
|
||||
#undef STAILQ_HEAD
|
||||
#undef STAILQ_HEAD_INITIALIZER
|
||||
#undef STAILQ_INIT
|
||||
#undef STAILQ_INSERT_AFTER
|
||||
#undef STAILQ_INSERT_HEAD
|
||||
#undef STAILQ_INSERT_TAIL
|
||||
#undef STAILQ_LAST
|
||||
#undef STAILQ_NEXT
|
||||
#undef STAILQ_REMOVE
|
||||
#undef STAILQ_REMOVE_HEAD
|
||||
#undef STAILQ_REMOVE_HEAD_UNTIL
|
||||
#undef TAILQ_CONCAT
|
||||
#undef TAILQ_EMPTY
|
||||
#undef TAILQ_ENTRY
|
||||
#undef TAILQ_FIRST
|
||||
#undef TAILQ_FOREACH
|
||||
#undef TAILQ_FOREACH_REVERSE
|
||||
#undef TAILQ_HEAD
|
||||
#undef TAILQ_HEAD_INITIALIZER
|
||||
#undef TAILQ_INIT
|
||||
#undef TAILQ_INSERT_AFTER
|
||||
#undef TAILQ_INSERT_BEFORE
|
||||
#undef TAILQ_INSERT_HEAD
|
||||
#undef TAILQ_INSERT_TAIL
|
||||
#undef TAILQ_LAST
|
||||
#undef TAILQ_NEXT
|
||||
#undef TAILQ_PREV
|
||||
#undef TAILQ_REMOVE
|
||||
#undef TRACEBUF
|
||||
#undef TRASHIT
|
||||
|
||||
#define QUEUE_MACRO_DEBUG 0
|
||||
#if QUEUE_MACRO_DEBUG
|
||||
/* Store the last 2 places the queue element or head was altered */
|
||||
struct qm_trace {
|
||||
char * lastfile;
|
||||
int lastline;
|
||||
char * prevfile;
|
||||
int prevline;
|
||||
};
|
||||
|
||||
#define TRACEBUF struct qm_trace trace;
|
||||
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
|
||||
|
||||
#define QMD_TRACE_HEAD(head) do { \
|
||||
(head)->trace.prevline = (head)->trace.lastline; \
|
||||
(head)->trace.prevfile = (head)->trace.lastfile; \
|
||||
(head)->trace.lastline = __LINE__; \
|
||||
(head)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#define QMD_TRACE_ELEM(elem) do { \
|
||||
(elem)->trace.prevline = (elem)->trace.lastline; \
|
||||
(elem)->trace.prevfile = (elem)->trace.lastfile; \
|
||||
(elem)->trace.lastline = __LINE__; \
|
||||
(elem)->trace.lastfile = __FILE__; \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define QMD_TRACE_ELEM(elem)
|
||||
#define QMD_TRACE_HEAD(head)
|
||||
#define TRACEBUF
|
||||
#define TRASHIT(x)
|
||||
#endif /* QUEUE_MACRO_DEBUG */
|
||||
|
||||
/*
|
||||
* Singly-linked List declarations.
|
||||
*/
|
||||
#define SLIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *slh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define SLIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define SLIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *sle_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked List functions.
|
||||
*/
|
||||
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
|
||||
|
||||
#define SLIST_FIRST(head) ((head)->slh_first)
|
||||
|
||||
#define SLIST_FOREACH(var, head, field) \
|
||||
for ((var) = SLIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
|
||||
for ((varp) = &SLIST_FIRST((head)); \
|
||||
((var) = *(varp)) != NULL; \
|
||||
(varp) = &SLIST_NEXT((var), field))
|
||||
|
||||
#define SLIST_INIT(head) do { \
|
||||
SLIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
|
||||
SLIST_NEXT((slistelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_INSERT_HEAD(head, elm, field) do { \
|
||||
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
|
||||
SLIST_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
|
||||
|
||||
#define SLIST_REMOVE(head, elm, type, field) do { \
|
||||
if (SLIST_FIRST((head)) == (elm)) { \
|
||||
SLIST_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = SLIST_FIRST((head)); \
|
||||
while (SLIST_NEXT(curelm, field) != (elm)) \
|
||||
curelm = SLIST_NEXT(curelm, field); \
|
||||
SLIST_NEXT(curelm, field) = \
|
||||
SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define SLIST_REMOVE_HEAD(head, field) do { \
|
||||
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue declarations.
|
||||
*/
|
||||
#define STAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *stqh_first;/* first element */ \
|
||||
struct type **stqh_last;/* addr of last next element */ \
|
||||
}
|
||||
|
||||
#define STAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).stqh_first }
|
||||
|
||||
#define STAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *stqe_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue functions.
|
||||
*/
|
||||
#define STAILQ_CONCAT(head1, head2) do { \
|
||||
if (!STAILQ_EMPTY((head2))) { \
|
||||
*(head1)->stqh_last = (head2)->stqh_first; \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
STAILQ_INIT((head2)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
|
||||
|
||||
#define STAILQ_FIRST(head) ((head)->stqh_first)
|
||||
|
||||
#define STAILQ_FOREACH(var, head, field) \
|
||||
for ((var) = STAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = STAILQ_NEXT((var), field))
|
||||
|
||||
#define STAILQ_INIT(head) do { \
|
||||
STAILQ_FIRST((head)) = NULL; \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_NEXT((tqelm), field) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
STAILQ_FIRST((head)) = (elm); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
STAILQ_NEXT((elm), field) = NULL; \
|
||||
*(head)->stqh_last = (elm); \
|
||||
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_LAST(head, type, field) \
|
||||
(STAILQ_EMPTY((head)) ? \
|
||||
NULL : \
|
||||
((struct type *) \
|
||||
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
|
||||
|
||||
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
|
||||
|
||||
#define STAILQ_REMOVE(head, elm, type, field) do { \
|
||||
if (STAILQ_FIRST((head)) == (elm)) { \
|
||||
STAILQ_REMOVE_HEAD((head), field); \
|
||||
} \
|
||||
else { \
|
||||
struct type *curelm = STAILQ_FIRST((head)); \
|
||||
while (STAILQ_NEXT(curelm, field) != (elm)) \
|
||||
curelm = STAILQ_NEXT(curelm, field); \
|
||||
if ((STAILQ_NEXT(curelm, field) = \
|
||||
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
|
||||
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_REMOVE_HEAD(head, field) do { \
|
||||
if ((STAILQ_FIRST((head)) = \
|
||||
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
|
||||
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
|
||||
(head)->stqh_last = &STAILQ_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* List declarations.
|
||||
*/
|
||||
#define LIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *lh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define LIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define LIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *le_next; /* next element */ \
|
||||
struct type **le_prev; /* address of previous next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* List functions.
|
||||
*/
|
||||
|
||||
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
|
||||
|
||||
#define LIST_FIRST(head) ((head)->lh_first)
|
||||
|
||||
#define LIST_FOREACH(var, head, field) \
|
||||
for ((var) = LIST_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = LIST_NEXT((var), field))
|
||||
|
||||
#define LIST_INIT(head) do { \
|
||||
LIST_FIRST((head)) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
|
||||
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
|
||||
LIST_NEXT((listelm), field)->field.le_prev = \
|
||||
&LIST_NEXT((elm), field); \
|
||||
LIST_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
(elm)->field.le_prev = (listelm)->field.le_prev; \
|
||||
LIST_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.le_prev = (elm); \
|
||||
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_INSERT_HEAD(head, elm, field) do { \
|
||||
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
|
||||
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
|
||||
LIST_FIRST((head)) = (elm); \
|
||||
(elm)->field.le_prev = &LIST_FIRST((head)); \
|
||||
} while (0)
|
||||
|
||||
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
|
||||
|
||||
#define LIST_REMOVE(elm, field) do { \
|
||||
if (LIST_NEXT((elm), field) != NULL) \
|
||||
LIST_NEXT((elm), field)->field.le_prev = \
|
||||
(elm)->field.le_prev; \
|
||||
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Tail queue declarations.
|
||||
*/
|
||||
#define TAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *tqh_first; /* first element */ \
|
||||
struct type **tqh_last; /* addr of last next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
#define TAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).tqh_first }
|
||||
|
||||
#define TAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *tqe_next; /* next element */ \
|
||||
struct type **tqe_prev; /* address of previous next element */ \
|
||||
TRACEBUF \
|
||||
}
|
||||
|
||||
/*
|
||||
* Tail queue functions.
|
||||
*/
|
||||
#define TAILQ_CONCAT(head1, head2, field) do { \
|
||||
if (!TAILQ_EMPTY(head2)) { \
|
||||
*(head1)->tqh_last = (head2)->tqh_first; \
|
||||
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
|
||||
(head1)->tqh_last = (head2)->tqh_last; \
|
||||
TAILQ_INIT((head2)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_HEAD(head2); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
|
||||
|
||||
#define TAILQ_FIRST(head) ((head)->tqh_first)
|
||||
|
||||
#define TAILQ_FOREACH(var, head, field) \
|
||||
for ((var) = TAILQ_FIRST((head)); \
|
||||
(var); \
|
||||
(var) = TAILQ_NEXT((var), field))
|
||||
|
||||
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
|
||||
for ((var) = TAILQ_LAST((head), headname); \
|
||||
(var); \
|
||||
(var) = TAILQ_PREV((var), headname, field))
|
||||
|
||||
#define TAILQ_INIT(head) do { \
|
||||
TAILQ_FIRST((head)) = NULL; \
|
||||
(head)->tqh_last = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else { \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
TAILQ_NEXT((listelm), field) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
||||
TAILQ_NEXT((elm), field) = (listelm); \
|
||||
*(listelm)->field.tqe_prev = (elm); \
|
||||
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
QMD_TRACE_ELEM(&listelm->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
|
||||
TAILQ_FIRST((head))->field.tqe_prev = \
|
||||
&TAILQ_NEXT((elm), field); \
|
||||
else \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
TAILQ_FIRST((head)) = (elm); \
|
||||
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
TAILQ_NEXT((elm), field) = NULL; \
|
||||
(elm)->field.tqe_prev = (head)->tqh_last; \
|
||||
*(head)->tqh_last = (elm); \
|
||||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
#define TAILQ_LAST(head, headname) \
|
||||
(*(((struct headname *)((head)->tqh_last))->tqh_last))
|
||||
|
||||
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
||||
|
||||
#define TAILQ_PREV(elm, headname, field) \
|
||||
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
|
||||
|
||||
#define TAILQ_REMOVE(head, elm, field) do { \
|
||||
if ((TAILQ_NEXT((elm), field)) != NULL) \
|
||||
TAILQ_NEXT((elm), field)->field.tqe_prev = \
|
||||
(elm)->field.tqe_prev; \
|
||||
else { \
|
||||
(head)->tqh_last = (elm)->field.tqe_prev; \
|
||||
QMD_TRACE_HEAD(head); \
|
||||
} \
|
||||
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
|
||||
TRASHIT((elm)->field.tqe_next); \
|
||||
TRASHIT((elm)->field.tqe_prev); \
|
||||
QMD_TRACE_ELEM(&(elm)->field); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Circular queue definitions.
|
||||
*/
|
||||
#define CIRCLEQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *cqh_first; /* first element */ \
|
||||
struct type *cqh_last; /* last element */ \
|
||||
}
|
||||
|
||||
#define CIRCLEQ_HEAD_INITIALIZER(head) \
|
||||
{ (void *)&head, (void *)&head }
|
||||
|
||||
#define CIRCLEQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *cqe_next; /* next element */ \
|
||||
struct type *cqe_prev; /* previous element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Circular queue functions.
|
||||
*/
|
||||
#define CIRCLEQ_INIT(head) do { \
|
||||
(head)->cqh_first = (void *)(head); \
|
||||
(head)->cqh_last = (void *)(head); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
|
||||
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
|
||||
(elm)->field.cqe_prev = (listelm); \
|
||||
if ((listelm)->field.cqe_next == (void *)(head)) \
|
||||
(head)->cqh_last = (elm); \
|
||||
else \
|
||||
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
|
||||
(listelm)->field.cqe_next = (elm); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
|
||||
(elm)->field.cqe_next = (listelm); \
|
||||
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
|
||||
if ((listelm)->field.cqe_prev == (void *)(head)) \
|
||||
(head)->cqh_first = (elm); \
|
||||
else \
|
||||
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
|
||||
(listelm)->field.cqe_prev = (elm); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
|
||||
(elm)->field.cqe_next = (head)->cqh_first; \
|
||||
(elm)->field.cqe_prev = (void *)(head); \
|
||||
if ((head)->cqh_last == (void *)(head)) \
|
||||
(head)->cqh_last = (elm); \
|
||||
else \
|
||||
(head)->cqh_first->field.cqe_prev = (elm); \
|
||||
(head)->cqh_first = (elm); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
|
||||
(elm)->field.cqe_next = (void *)(head); \
|
||||
(elm)->field.cqe_prev = (head)->cqh_last; \
|
||||
if ((head)->cqh_first == (void *)(head)) \
|
||||
(head)->cqh_first = (elm); \
|
||||
else \
|
||||
(head)->cqh_last->field.cqe_next = (elm); \
|
||||
(head)->cqh_last = (elm); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_REMOVE(head, elm, field) do { \
|
||||
if ((elm)->field.cqe_next == (void *)(head)) \
|
||||
(head)->cqh_last = (elm)->field.cqe_prev; \
|
||||
else \
|
||||
(elm)->field.cqe_next->field.cqe_prev = \
|
||||
(elm)->field.cqe_prev; \
|
||||
if ((elm)->field.cqe_prev == (void *)(head)) \
|
||||
(head)->cqh_first = (elm)->field.cqe_next; \
|
||||
else \
|
||||
(elm)->field.cqe_prev->field.cqe_next = \
|
||||
(elm)->field.cqe_next; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define CIRCLEQ_FOREACH(var, head, field) \
|
||||
for ((var) = ((head)->cqh_first); \
|
||||
(var) != (const void *)(head); \
|
||||
(var) = ((var)->field.cqe_next))
|
||||
|
||||
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
|
||||
for ((var) = ((head)->cqh_last); \
|
||||
(var) != (const void *)(head); \
|
||||
(var) = ((var)->field.cqe_prev))
|
||||
|
||||
/*
|
||||
* Circular queue access methods.
|
||||
*/
|
||||
#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
|
||||
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
|
||||
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
|
||||
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
|
||||
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
|
||||
|
||||
#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
|
||||
(((elm)->field.cqe_next == (void *)(head)) \
|
||||
? ((head)->cqh_first) \
|
||||
: (elm->field.cqe_next))
|
||||
#define CIRCLEQ_LOOP_PREV(head, elm, field) \
|
||||
(((elm)->field.cqe_prev == (void *)(head)) \
|
||||
? ((head)->cqh_last) \
|
||||
: (elm->field.cqe_prev))
|
||||
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
#endif /* !_DB_QUEUE_H_ */
|
305
c_src/wterl.c
305
c_src/wterl.c
|
@ -21,56 +21,47 @@
|
|||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include <inttypes.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#define dprint(s, ...) do { \
|
||||
fprintf(stderr, s, ##__VA_ARGS__); \
|
||||
fprintf(stderr, "\r\n"); \
|
||||
fflush(stderr); \
|
||||
} while(0);
|
||||
#else
|
||||
# define dprint(s, ...) {}
|
||||
#endif
|
||||
|
||||
#ifndef UNUSED
|
||||
#define UNUSED(v) ((void)(v))
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
#include "wiredtiger.h"
|
||||
#include "async_nif.h"
|
||||
#include "khash.h"
|
||||
|
||||
#ifdef WTERL_STATS
|
||||
#include "stats.h"
|
||||
#endif
|
||||
|
||||
#if (ASYNC_NIF_MAX_WORKERS > 32768)
|
||||
#error "WterlCtx cache won't work properly with > 32,768 workers."
|
||||
#endif
|
||||
#define CTX_CACHE_SIZE ASYNC_NIF_MAX_WORKERS
|
||||
|
||||
static ErlNifResourceType *wterl_conn_RESOURCE;
|
||||
static ErlNifResourceType *wterl_ctx_RESOURCE;
|
||||
static ErlNifResourceType *wterl_cursor_RESOURCE;
|
||||
|
||||
/* Generators for named, type-specific hash table functions. */
|
||||
KHASH_MAP_INIT_STR(uri, unsigned int); // URI -> number of cursors(URI)
|
||||
typedef struct struct WterlCtxHandle {
|
||||
WT_SESSION *session; // open session
|
||||
WT_CURSOR *cursors[]; // open cursors, all reset ready to reuse
|
||||
} WterlCtxHandle;
|
||||
|
||||
union {
|
||||
unsigned int hash[];
|
||||
struct {
|
||||
unsigned int:02 nest; // cuckoo's nest choosen on hash collision
|
||||
unsigned int:15 off; // bitpop((bmp & (1 << off) - 1) & bmp)
|
||||
unsigned int:10 depth;
|
||||
} nests;
|
||||
} cuckoo;
|
||||
struct ctx_lru_entry {
|
||||
WterlCtxHandle *ctx;
|
||||
u_int64_t sig;
|
||||
SLIST_HEAD(ctx, struct WterlCtxHandle*) set;
|
||||
STAILQ_ENTRY(struct ctx_lru_entry) entries;
|
||||
};
|
||||
|
||||
KHASH_SET_INIT_INT64(ctx_idx, struct ctx_group*);
|
||||
|
||||
typedef struct {
|
||||
WT_SESSION *session;
|
||||
WT_CURSOR *cursor;
|
||||
} WterlCtx;
|
||||
struct ctx_cache {
|
||||
size_t size;
|
||||
struct lru {
|
||||
STAILQ_HEAD(lru, struct ctx_lru_entry*) lru;
|
||||
} lru;
|
||||
struct idx {
|
||||
int h;
|
||||
ctx_group cgs[CTX_CACHE_SIZE];
|
||||
khash_t(ctx_idx) *ctx;
|
||||
} idx;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
WT_CONNECTION *conn;
|
||||
|
@ -127,6 +118,213 @@ struct wterl_priv_data {
|
|||
ASYNC_NIF_INIT(wterl);
|
||||
|
||||
|
||||
/**
|
||||
* Is the cache full?
|
||||
*
|
||||
* Test to see if the cache is full or not.
|
||||
*
|
||||
* -> 0=false/not full yet, 1=true/cache is full
|
||||
*/
|
||||
static int
|
||||
__ctx_cache_full(struct wterl_ctx_cache *cache)
|
||||
{
|
||||
return cache->size == CTX_CACHE_SIZE ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Evict items from the cache.
|
||||
*
|
||||
* Evict some number of items from the cache to make space for other items.
|
||||
*
|
||||
* -> number of items evicted
|
||||
*/
|
||||
static int
|
||||
__ctx_cache_evict(struct ctx_cache *cache)
|
||||
{
|
||||
// TODO:
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a matching item in the cache.
|
||||
*
|
||||
* See if there exists an item in the cache with a matching signature, if
|
||||
* so remove it from the cache and return it for use by the callee.
|
||||
*
|
||||
* sig a 32-bit signature (hash) representing the session/cursor* needed
|
||||
* for the operation
|
||||
*/
|
||||
static WterlCtxHandle *
|
||||
__ctx_cache_find(wterl_ctx_cache *cache, u_int64_t sig)
|
||||
{
|
||||
khiter_t k;
|
||||
|
||||
kh_get(ctx_idx, cache->idx.ctx, sig);
|
||||
if (k != kh_end(h)) {
|
||||
/*
|
||||
* This signature exists in the hashtable, that's good news. Maybe
|
||||
* there is a context open and ready for us to reuse, let's check.
|
||||
*/
|
||||
struct ctx_group *cg = kh_value(ctx_idx, k);
|
||||
if (SLIST_EMPTY(cg->set)) { // cache miss
|
||||
/*
|
||||
* Nope, there are no contexts available for reuse with this
|
||||
* signature.
|
||||
*/
|
||||
return NULL;
|
||||
} else { // cache hit
|
||||
/*
|
||||
* Yes, we've found a context available for reuse with the
|
||||
* desired signature. Remove it from the cache and return it
|
||||
* to the caller.
|
||||
*/
|
||||
WterlCtxHandle *p = SLIST_REMOVE(cg->set); // remove from index
|
||||
WterlCtxHandle *q;
|
||||
STAILQ_FOREACH(q, &cache->lru, entries) {
|
||||
if (p == q) {
|
||||
STAILQ_REMOVE(&cache-lru, q, ctx_lru_entries, entries);
|
||||
}
|
||||
}
|
||||
// remove from lru
|
||||
cache->size--; // update cache size
|
||||
return p;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The signature didn't match any that we're caching contexts for right
|
||||
* now, so clearly there won't be any cached contexts for this either.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add/Return an item to the cache.
|
||||
*
|
||||
* Return an item into the cache, reset the cursors it has open and put it at
|
||||
* the front of the LRU.
|
||||
*/
|
||||
static int
|
||||
__ctx_cache_add(wterl_ctx_cache *cache, WterlCtxHandle *e)
|
||||
{
|
||||
khiter_t k;
|
||||
|
||||
kh_get(ctx_idx, cache->idx.ctx, sig);
|
||||
if (k != kh_end(h)) {
|
||||
/*
|
||||
* This signature exists in the bitmap, that's good news. We can just
|
||||
* put this into the list of cached items for that signature.
|
||||
*/
|
||||
struct ctx_group *cg = kh_value(ctx_idx, k);
|
||||
SLIST_INSERT_HEAD(cg->set, e, entries); // add to index
|
||||
STAILQ_INSERT_HEAD(&cache->lru, e, entries); // add to lru
|
||||
cache->size++; // update cache size
|
||||
} else {
|
||||
/*
|
||||
* The signature didn't match any that we're caching contexts for right
|
||||
* now, so we need to add a context group for it.
|
||||
*/
|
||||
if (cache->idx
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce the Morton Number from two 32-bit unsigned integers.
|
||||
* e.g. p = 0101 1011 0100 0011
|
||||
* q = 1011 1100 0001 0011
|
||||
* z = 0110 0111 1101 1010 0010 0001 0000 1111
|
||||
*/
|
||||
static inline u_int64_t
|
||||
__interleave(u_int32_t p, u_int32_t q)
|
||||
{
|
||||
static const u_int32_t B[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
|
||||
static const u_int32_t S[] = {1, 2, 4, 8};
|
||||
u_int32_t x, y;
|
||||
u_int64_t z;
|
||||
|
||||
x = p & 0x0000FFFF; // Interleave lower 16 bits of p as x and q as y, so the
|
||||
y = q & 0x0000FFFF; // bits of x are in the even positions and bits from y
|
||||
z = 0; // in the odd; the first 32 bits of 'z' is the result.
|
||||
|
||||
x = (x | (x << S[3])) & B[3];
|
||||
x = (x | (x << S[2])) & B[2];
|
||||
x = (x | (x << S[1])) & B[1];
|
||||
x = (x | (x << S[0])) & B[0];
|
||||
|
||||
y = (y | (y << S[3])) & B[3];
|
||||
y = (y | (y << S[2])) & B[2];
|
||||
y = (y | (y << S[1])) & B[1];
|
||||
y = (y | (y << S[0])) & B[0];
|
||||
|
||||
z = x | (y << 1);
|
||||
|
||||
x = (p >> 16) & 0x0000FFFF; // Interleave the upper 16 bits of p as x and q as y
|
||||
y = (q >> 16) & 0x0000FFFF; // just as before.
|
||||
|
||||
x = (x | (x << S[3])) & B[3];
|
||||
x = (x | (x << S[2])) & B[2];
|
||||
x = (x | (x << S[1])) & B[1];
|
||||
x = (x | (x << S[0])) & B[0];
|
||||
|
||||
y = (y | (y << S[3])) & B[3];
|
||||
y = (y | (y << S[2])) & B[2];
|
||||
y = (y | (y << S[1])) & B[1];
|
||||
y = (y | (y << S[0])) & B[0];
|
||||
|
||||
z = (z << 16) | (x | (y << 1)); // the resulting 64-bit Morton Number.
|
||||
|
||||
return z;
|
||||
}
|
||||
|
||||
/**
|
||||
* A string hash function.
|
||||
*
|
||||
* A basic hash function for strings of characters used during the
|
||||
* affinity association.
|
||||
*
|
||||
* s a NULL terminated set of bytes to be hashed
|
||||
* -> an integer hash encoding of the bytes
|
||||
*/
|
||||
static inline unsigned int
|
||||
__str_hash_func(const char *s)
|
||||
{
|
||||
unsigned int h = (unsigned int)*s;
|
||||
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (unsigned int)*s;
|
||||
return h;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a signature for the operation we're about to perform.
|
||||
*
|
||||
* Create a 32bit signature for this a combination of session configuration
|
||||
* some number of cursors open on tables each potentially with a different
|
||||
* configuration. "session_config, [{table_name, cursor_config}, ...]"
|
||||
*
|
||||
* session_config the string used to configure the WT_SESSION
|
||||
* ... each pair of items in the varargs array is a table name,
|
||||
* cursor config pair
|
||||
*/
|
||||
static u_int32_t
|
||||
__ctx_cache_sig(const char *session_config, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int i;
|
||||
u_int64_t h;
|
||||
|
||||
if (NULL == session_config)
|
||||
return 0;
|
||||
|
||||
h = __str_hash_fn(session_config);
|
||||
|
||||
va_start (ap, count);
|
||||
for (i = 0; i < count; i++) {
|
||||
h = __morton(h, __str_hash_fn(va_arg(ap, const char *)));
|
||||
h <<= 1;
|
||||
}
|
||||
va_end (ap);
|
||||
return (u_int32_t)(h & 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback to handle error messages.
|
||||
*
|
||||
|
@ -269,6 +467,24 @@ __init_session_and_cursor_cache(WterlConnHandle *conn_handle, WterlCtx *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the per-worker reusable WT_SESSION for a worker_id.
|
||||
*/
|
||||
static int
|
||||
__session_for(WterlConnHandle *conn_handle, unsigned int worker_id, WT_SESSION **session)
|
||||
{
|
||||
WterlCtx *ctx = &conn_handle->contexts[worker_id];
|
||||
int rc = 0;
|
||||
|
||||
if (ctx->session == NULL) {
|
||||
enif_mutex_lock(conn_handle->contexts_mutex);
|
||||
rc = __init_session_and_cursor_cache(conn_handle, ctx);
|
||||
enif_mutex_unlock(conn_handle->contexts_mutex);
|
||||
}
|
||||
*session = ctx->session;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close all sessions and all cursors open on any objects.
|
||||
*
|
||||
|
@ -329,23 +545,6 @@ __close_cursors_on(WterlConnHandle *conn_handle, const char *uri)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A string hash function.
|
||||
*
|
||||
* A basic hash function for strings of characters used during the
|
||||
* affinity association.
|
||||
*
|
||||
* s a NULL terminated set of bytes to be hashed
|
||||
* -> an integer hash encoding of the bytes
|
||||
*/
|
||||
static inline unsigned int
|
||||
__str_hash_func(const char *s)
|
||||
{
|
||||
unsigned int h = (unsigned int)*s;
|
||||
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (unsigned int)*s;
|
||||
return h;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a reusable cursor that was opened for a particular worker within its
|
||||
* session.
|
||||
|
|
Loading…
Reference in a new issue