fix casts; add missing BEGIN_C_DECLS; designated initializers; libstasis.so now builds + runs under both g++ and gcc!
This commit is contained in:
parent
e35d078dde
commit
3d707c71b6
53 changed files with 444 additions and 353 deletions
|
@ -18,7 +18,7 @@
|
|||
<folderInfo id="cdt.managedbuild.toolchain.gnu.base.713831365.1489428146" name="/" resourcePath="">
|
||||
<toolChain id="cdt.managedbuild.toolchain.gnu.base.930492925" name="cdt.managedbuild.toolchain.gnu.base" superClass="cdt.managedbuild.toolchain.gnu.base">
|
||||
<targetPlatform archList="all" binaryParser="org.eclipse.cdt.core.ELF" id="cdt.managedbuild.target.gnu.platform.base.1380865982" name="Debug Platform" osList="linux,hpux,aix,qnx" superClass="cdt.managedbuild.target.gnu.platform.base"/>
|
||||
<builder buildPath="${workspace_loc:/stasis/build/}" id="cdt.managedbuild.target.gnu.builder.base.386827070" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" parallelBuildOn="true" parallelizationNumber="optimal" superClass="cdt.managedbuild.target.gnu.builder.base"/>
|
||||
<builder buildPath="${workspace_loc:/stasis/build/}" id="cdt.managedbuild.target.gnu.builder.base.386827070" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" parallelBuildOn="true" parallelizationNumber="optimal" stopOnErr="false" superClass="cdt.managedbuild.target.gnu.builder.base"/>
|
||||
<tool id="cdt.managedbuild.tool.gnu.archiver.base.788388724" name="GCC Archiver" superClass="cdt.managedbuild.tool.gnu.archiver.base"/>
|
||||
<tool id="cdt.managedbuild.tool.gnu.cpp.compiler.base.761900506" name="GCC C++ Compiler" superClass="cdt.managedbuild.tool.gnu.cpp.compiler.base"/>
|
||||
<tool id="cdt.managedbuild.tool.gnu.c.compiler.base.1946158569" name="GCC C Compiler" superClass="cdt.managedbuild.tool.gnu.c.compiler.base">
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <stasis/common.h>
|
||||
#include <stdio.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
struct pblHashTable_t;
|
||||
typedef struct pblHashTable_t pblHashTable_t;
|
||||
|
||||
|
@ -29,6 +31,8 @@ void * pblHtNext ( pblHashTable_t * h );
|
|||
void * pblHtCurrent ( pblHashTable_t * h );
|
||||
void * pblHtCurrentKey ( pblHashTable_t * h );
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#else
|
||||
#error pbl.h was included, but PBL_COMPAT is not defined!
|
||||
#endif
|
||||
|
|
|
@ -167,7 +167,7 @@ static int pageOwners_remove(stasis_allocation_policy_t *ap, pageid_t pageid) {
|
|||
int pageOwners_lookup_by_xid_freespace(stasis_allocation_policy_t *ap, int xid, size_t freespace, pageid_t* pageid) {
|
||||
pageOwners_xid_freespace_pageid query = { xid, freespace, 0 };
|
||||
// find lowest numbered page w/ enough freespace.
|
||||
const pageOwners_xid_freespace_pageid *tup = rblookup(RB_LUGTEQ, &query, ap->pageOwners_key_xid_freespace_pageid);
|
||||
const pageOwners_xid_freespace_pageid *tup = (const pageOwners_xid_freespace_pageid *)rblookup(RB_LUGTEQ, &query, ap->pageOwners_key_xid_freespace_pageid);
|
||||
if(tup && tup->xid == xid) {
|
||||
assert(tup->freespace >= freespace);
|
||||
*pageid = tup->pageid;
|
||||
|
@ -178,7 +178,7 @@ int pageOwners_lookup_by_xid_freespace(stasis_allocation_policy_t *ap, int xid,
|
|||
}
|
||||
int pageOwners_lookup_by_pageid(stasis_allocation_policy_t* ap, pageid_t pageid, int *xid, size_t *freespace) {
|
||||
const pageOwners_xid_freespace_pageid query = { 0, 0, pageid };
|
||||
const pageOwners_xid_freespace_pageid *tup = rbfind(&query, ap->pageOwners_key_pageid);
|
||||
const pageOwners_xid_freespace_pageid *tup = (const pageOwners_xid_freespace_pageid *)rbfind(&query, ap->pageOwners_key_pageid);
|
||||
if(tup) {
|
||||
*xid = tup->xid;
|
||||
*freespace = tup->freespace;
|
||||
|
@ -192,7 +192,7 @@ int pageOwners_lookup_by_pageid(stasis_allocation_policy_t* ap, pageid_t pageid,
|
|||
// ######## AllPages #############
|
||||
static int allPages_lookup_by_pageid(stasis_allocation_policy_t *ap, pageid_t pageid, size_t *freespace) {
|
||||
allPages_pageid_freespace query = {pageid, 0};
|
||||
const allPages_pageid_freespace * tup = rbfind(&query, ap->allPages_key_pageid);
|
||||
const allPages_pageid_freespace * tup = (const allPages_pageid_freespace *)rbfind(&query, ap->allPages_key_pageid);
|
||||
if(tup) {
|
||||
assert(tup->pageid == pageid);
|
||||
*freespace = tup->freespace;
|
||||
|
@ -224,7 +224,7 @@ static int allPages_remove(stasis_allocation_policy_t *ap, pageid_t pageid) {
|
|||
}
|
||||
static void allPages_removeAll(stasis_allocation_policy_t *ap) {
|
||||
const allPages_pageid_freespace * tup;
|
||||
while((tup = rbmin(ap->allPages_key_pageid))) {
|
||||
while((tup = (const allPages_pageid_freespace *)rbmin(ap->allPages_key_pageid))) {
|
||||
allPages_remove(ap, tup->pageid);
|
||||
}
|
||||
}
|
||||
|
@ -249,7 +249,7 @@ static void allPages_set_freespace(stasis_allocation_policy_t *ap, pageid_t page
|
|||
}
|
||||
static int xidAllocedDealloced_helper_lookup_by_xid(struct rbtree *t, int xid, pageid_t **pages, size_t*count) {
|
||||
xidAllocedDealloced_xid_pageid query = {xid, 0};
|
||||
const xidAllocedDealloced_xid_pageid *tup = rblookup(RB_LUGTEQ, &query, t);
|
||||
const xidAllocedDealloced_xid_pageid *tup = (const xidAllocedDealloced_xid_pageid *)rblookup(RB_LUGTEQ, &query, t);
|
||||
int ret = 0;
|
||||
*pages = 0;
|
||||
*count = 0;
|
||||
|
@ -261,13 +261,13 @@ static int xidAllocedDealloced_helper_lookup_by_xid(struct rbtree *t, int xid, p
|
|||
// printf("pages %x count %x *pages %x len %lld \n", pages, count, *pages, *count * sizeof(*pages[0]));
|
||||
fflush(stdout);
|
||||
(*pages)[(*count) - 1] = tup->pageid;
|
||||
tup = rblookup(RB_LUGREAT, tup, t);
|
||||
tup = (const xidAllocedDealloced_xid_pageid *)rblookup(RB_LUGREAT, tup, t);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
static int xidAllocedDealloced_helper_lookup_by_pageid(struct rbtree *t, pageid_t pageid, int ** xids, size_t * count) {
|
||||
xidAllocedDealloced_xid_pageid query = {0, pageid};
|
||||
const xidAllocedDealloced_xid_pageid *tup = rblookup(RB_LUGTEQ, &query, t);
|
||||
const xidAllocedDealloced_xid_pageid *tup = (const xidAllocedDealloced_xid_pageid *)rblookup(RB_LUGTEQ, &query, t);
|
||||
int ret = 0;
|
||||
*xids = 0;
|
||||
*count = 0;
|
||||
|
@ -277,7 +277,7 @@ static int xidAllocedDealloced_helper_lookup_by_pageid(struct rbtree *t, pageid_
|
|||
(*count)++;
|
||||
*xids = stasis_realloc(*xids, *count, int);
|
||||
(*xids)[(*count) - 1] = tup->xid;
|
||||
tup = rblookup(RB_LUGREAT, tup, t);
|
||||
tup = (const xidAllocedDealloced_xid_pageid *)rblookup(RB_LUGREAT, tup, t);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -370,7 +370,8 @@ static int availablePages_cmp_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int availablePages_cmp_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const availablePages_pageid_freespace *a = ap, *b = bp;
|
||||
const availablePages_pageid_freespace *a = (const availablePages_pageid_freespace *)ap;
|
||||
const availablePages_pageid_freespace *b = (const availablePages_pageid_freespace *)bp;
|
||||
return (a->pageid < b->pageid) ? -1 :
|
||||
((a->pageid > b->pageid) ? 1 :
|
||||
(0));
|
||||
|
@ -380,7 +381,8 @@ static int availablePages_cmp_freespace_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int availablePages_cmp_freespace_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const availablePages_pageid_freespace *a = ap, *b = bp;
|
||||
const availablePages_pageid_freespace *a = (const availablePages_pageid_freespace *)ap;
|
||||
const availablePages_pageid_freespace *b = (const availablePages_pageid_freespace *)bp;
|
||||
int ret = (a->freespace < b->freespace) ? -1 :
|
||||
((a->freespace > b->freespace) ? 1 :
|
||||
((a->pageid < b->pageid) ? -1 :
|
||||
|
@ -390,7 +392,7 @@ static int availablePages_cmp_freespace_pageid(const void *ap, const void *bp, c
|
|||
}
|
||||
int availablePages_lookup_by_freespace(stasis_allocation_policy_t *ap, size_t freespace, pageid_t *pageid) {
|
||||
const availablePages_pageid_freespace query = { 0, freespace };
|
||||
const availablePages_pageid_freespace *tup = rblookup(RB_LUGTEQ, &query, ap->availablePages_key_freespace_pageid);
|
||||
const availablePages_pageid_freespace *tup = (const availablePages_pageid_freespace *)rblookup(RB_LUGTEQ, &query, ap->availablePages_key_freespace_pageid);
|
||||
if(tup && tup->freespace >= freespace ) {
|
||||
*pageid = tup->pageid;
|
||||
return 1;
|
||||
|
@ -404,7 +406,8 @@ static int pageOwners_cmp_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int pageOwners_cmp_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const pageOwners_xid_freespace_pageid *a = ap, *b = bp;
|
||||
const pageOwners_xid_freespace_pageid *a = (const pageOwners_xid_freespace_pageid *)ap;
|
||||
const pageOwners_xid_freespace_pageid *b = (const pageOwners_xid_freespace_pageid *)bp;
|
||||
return (a->pageid < b->pageid) ? -1 :
|
||||
((a->pageid > b->pageid) ? 1 : 0);
|
||||
}
|
||||
|
@ -413,7 +416,8 @@ static int pageOwners_cmp_xid_freespace_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int pageOwners_cmp_xid_freespace_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const pageOwners_xid_freespace_pageid *a = ap, *b = bp;
|
||||
const pageOwners_xid_freespace_pageid *a = (const pageOwners_xid_freespace_pageid *)ap;
|
||||
const pageOwners_xid_freespace_pageid *b = (const pageOwners_xid_freespace_pageid *)bp;
|
||||
return (a->xid < b->xid) ? -1 :
|
||||
((a->xid > b->xid) ? 1 :
|
||||
((a->freespace < b->freespace) ? -1 :
|
||||
|
@ -426,7 +430,8 @@ static int allPages_cmp_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int allPages_cmp_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const allPages_pageid_freespace *a = ap, *b = bp;
|
||||
const allPages_pageid_freespace *a = (const allPages_pageid_freespace *)ap;
|
||||
const allPages_pageid_freespace *b = (const allPages_pageid_freespace *)bp;
|
||||
return (a->pageid < b->pageid) ? -1 :
|
||||
((a->pageid > b->pageid) ? 1 : 0);
|
||||
}
|
||||
|
@ -435,7 +440,8 @@ static int xidAllocedDealloced_cmp_pageid_xid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int xidAllocedDealloced_cmp_pageid_xid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const xidAllocedDealloced_xid_pageid *a = ap, *b = bp;
|
||||
const xidAllocedDealloced_xid_pageid *a = (const xidAllocedDealloced_xid_pageid *)ap;
|
||||
const xidAllocedDealloced_xid_pageid *b = (const xidAllocedDealloced_xid_pageid *)bp;
|
||||
return (a->pageid < b->pageid) ? -1 :
|
||||
((a->pageid > b->pageid) ? 1 :
|
||||
((a->xid < b->xid) ? -1 :
|
||||
|
@ -446,7 +452,8 @@ static int xidAllocedDealloced_cmp_xid_pageid(const void *ap, const void *bp) {
|
|||
#else
|
||||
static int xidAllocedDealloced_cmp_xid_pageid(const void *ap, const void *bp, const void* ign) {
|
||||
#endif
|
||||
const xidAllocedDealloced_xid_pageid *a = ap, *b = bp;
|
||||
const xidAllocedDealloced_xid_pageid *a = (const xidAllocedDealloced_xid_pageid *)ap;
|
||||
const xidAllocedDealloced_xid_pageid *b = (const xidAllocedDealloced_xid_pageid *)bp;
|
||||
return (a->xid < b->xid) ? -1 :
|
||||
((a->xid > b->xid) ? 1 :
|
||||
((a->pageid < b->pageid) ? -1 :
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <stasis/bufferPool.h>
|
||||
#include <stasis/pageHandle.h>
|
||||
#include <stasis/flags.h>
|
||||
#include <stasis/bufferManager/concurrentBufferManager.h>
|
||||
|
||||
//#define STRESS_TEST_WRITEBACK 1 // if defined, writeback as much as possible, as fast as possible.
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <stasis/bufferPool.h>
|
||||
#include <stasis/bufferManager/legacy/pageFile.h>
|
||||
#include <stasis/bufferManager/legacy/pageCache.h>
|
||||
#include <stasis/bufferManager/legacy/legacyBufferManager.h>
|
||||
|
||||
#include <stasis/page.h>
|
||||
|
||||
|
|
|
@ -22,13 +22,13 @@ typedef struct {
|
|||
|
||||
|
||||
static int dpt_cmp_page(const void *ap, const void * bp, const void * ignored) {
|
||||
const dpt_entry * a = ap;
|
||||
const dpt_entry * b = bp;
|
||||
const dpt_entry * a = (const dpt_entry *)ap;
|
||||
const dpt_entry * b = (const dpt_entry *)bp;
|
||||
return (a->p < b->p) ? -1 : ((a->p == b->p) ? 0 : 1);
|
||||
}
|
||||
static int dpt_cmp_lsn_and_page(const void *ap, const void * bp, const void * ignored) {
|
||||
const dpt_entry * a = ap;
|
||||
const dpt_entry * b = bp;
|
||||
const dpt_entry * a = (const dpt_entry *)ap;
|
||||
const dpt_entry * b = (const dpt_entry *)bp;
|
||||
|
||||
return (a->lsn < b->lsn) ? -1 : ((a->lsn == b->lsn) ? dpt_cmp_page(ap, bp, 0) : 1);
|
||||
}
|
||||
|
@ -85,13 +85,13 @@ void stasis_dirty_page_table_set_clean(stasis_dirty_page_table_t * dirtyPages, P
|
|||
if(p->dirty) {
|
||||
dpt_entry dummy = {p->id, 0};
|
||||
|
||||
const dpt_entry * e = rbdelete(&dummy, dirtyPages->tableByPage);
|
||||
const dpt_entry * e = (const dpt_entry *)rbdelete(&dummy, dirtyPages->tableByPage);
|
||||
assert(e);
|
||||
assert(e->p == p->id);
|
||||
dummy.lsn = e->lsn;
|
||||
free((void*)e);
|
||||
|
||||
e = rbdelete(&dummy, dirtyPages->tableByLsnAndPage);
|
||||
e = (const dpt_entry *)rbdelete(&dummy, dirtyPages->tableByLsnAndPage);
|
||||
assert(e);
|
||||
assert(e->p == p->id);
|
||||
assert(e->lsn == dummy.lsn);
|
||||
|
@ -100,7 +100,7 @@ void stasis_dirty_page_table_set_clean(stasis_dirty_page_table_t * dirtyPages, P
|
|||
p->dirty = 0;
|
||||
|
||||
lsn_t min_waiting = stasis_util_multiset_min(dirtyPages->outstanding_flush_lsns);
|
||||
e = rbmin(dirtyPages->tableByLsnAndPage);
|
||||
e = (const dpt_entry *)rbmin(dirtyPages->tableByLsnAndPage);
|
||||
if(dummy.lsn >= min_waiting &&
|
||||
(!e || e->lsn >= min_waiting)) {
|
||||
pthread_cond_broadcast( &dirtyPages->writebackCond );
|
||||
|
@ -129,7 +129,7 @@ int stasis_dirty_page_table_is_dirty(stasis_dirty_page_table_t * dirtyPages, Pag
|
|||
|
||||
lsn_t stasis_dirty_page_table_minRecLSN(stasis_dirty_page_table_t * dirtyPages) {
|
||||
pthread_mutex_lock(&dirtyPages->mutex);
|
||||
const dpt_entry * e = rbmin(dirtyPages->tableByLsnAndPage);
|
||||
const dpt_entry * e = (const dpt_entry *)rbmin(dirtyPages->tableByLsnAndPage);
|
||||
lsn_t lsn = e ? e->lsn : LSN_T_MAX;
|
||||
pthread_mutex_unlock(&dirtyPages->mutex);
|
||||
return lsn;
|
||||
|
@ -175,9 +175,9 @@ int stasis_dirty_page_table_flush_with_target(stasis_dirty_page_table_t * dirtyP
|
|||
int off = 0;
|
||||
int strides = 0;
|
||||
all_flushed = 1;
|
||||
for(const dpt_entry * e = rblookup(RB_LUGTEQ, &dummy, tree);
|
||||
for(const dpt_entry * e = (const dpt_entry *)rblookup(RB_LUGTEQ, &dummy, tree);
|
||||
e && e->lsn < targetLsn;
|
||||
e = rblookup(RB_LUGREAT, &dummy, tree)) {
|
||||
e = (const dpt_entry *)rblookup(RB_LUGREAT, &dummy, tree)) {
|
||||
dummy = *e;
|
||||
vals[off] = dummy.p;
|
||||
off++;
|
||||
|
@ -287,9 +287,9 @@ int stasis_dirty_page_table_get_flush_candidates(stasis_dirty_page_table_t * dir
|
|||
dummy.lsn = -1;
|
||||
dummy.p = start;
|
||||
|
||||
for(const dpt_entry *e = rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
for(const dpt_entry *e = (const dpt_entry *)rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
e && (stop == 0 || e->p < stop) && n < ATOMIC_READ_32(0, &count);
|
||||
e = rblookup(RB_LUGREAT, e, dirtyPages->tableByPage)) {
|
||||
e = (const dpt_entry *)rblookup(RB_LUGREAT, e, dirtyPages->tableByPage)) {
|
||||
if(n == 0 || range_ends[b] != e->p) {
|
||||
b++;
|
||||
range_starts[b] = e->p;
|
||||
|
@ -319,9 +319,9 @@ void stasis_dirty_page_table_flush_range(stasis_dirty_page_table_t * dirtyPages,
|
|||
pageid_t * staleDirtyPages = 0;
|
||||
pageid_t n = 0;
|
||||
dpt_entry dummy = { start, 0 };
|
||||
for(const dpt_entry * e = rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
for(const dpt_entry * e = (const dpt_entry *)rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
e && (stop == 0 || e->p < stop);
|
||||
e = rblookup(RB_LUGREAT, e, dirtyPages->tableByPage)) {
|
||||
e = (const dpt_entry *)rblookup(RB_LUGREAT, e, dirtyPages->tableByPage)) {
|
||||
n++;
|
||||
staleDirtyPages = stasis_realloc(staleDirtyPages, n, pageid_t);
|
||||
staleDirtyPages[n-1] = e->p;
|
||||
|
@ -360,9 +360,9 @@ stasis_dirty_page_table_t * stasis_dirty_page_table_init(void) {
|
|||
void stasis_dirty_page_table_deinit(stasis_dirty_page_table_t * dirtyPages) {
|
||||
int areDirty = 0;
|
||||
dpt_entry dummy = {0, 0};
|
||||
for(const dpt_entry * e = rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
for(const dpt_entry * e = (const dpt_entry *)rblookup(RB_LUGTEQ, &dummy, dirtyPages->tableByPage);
|
||||
e;
|
||||
e = rblookup(RB_LUGREAT, &dummy, dirtyPages->tableByPage)) {
|
||||
e = (const dpt_entry *)rblookup(RB_LUGREAT, &dummy, dirtyPages->tableByPage)) {
|
||||
|
||||
if((!areDirty) &&
|
||||
(!stasis_suppress_unclean_shutdown_warnings)) {
|
||||
|
@ -376,9 +376,9 @@ void stasis_dirty_page_table_deinit(stasis_dirty_page_table_t * dirtyPages) {
|
|||
}
|
||||
|
||||
dpt_entry dummy2 = {0, 0};
|
||||
for(const dpt_entry * e = rblookup(RB_LUGTEQ, &dummy2, dirtyPages->tableByLsnAndPage);
|
||||
for(const dpt_entry * e = (const dpt_entry *)rblookup(RB_LUGTEQ, &dummy2, dirtyPages->tableByLsnAndPage);
|
||||
e;
|
||||
e = rblookup(RB_LUGREAT, &dummy2, dirtyPages->tableByLsnAndPage)) {
|
||||
e = (const dpt_entry *)rblookup(RB_LUGREAT, &dummy2, dirtyPages->tableByLsnAndPage)) {
|
||||
dummy2 = *e;
|
||||
rbdelete(e, dirtyPages->tableByLsnAndPage);
|
||||
free((void*)e);
|
||||
|
|
|
@ -136,22 +136,25 @@ static int debug_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
|||
printf("tid=%9ld retn force(%lx) = %d\n", (long)(intptr_t)pthread_self(), (unsigned long)hh, ret); fflush(stdout);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct stasis_handle_t debug_func = {
|
||||
.num_copies = debug_num_copies,
|
||||
.num_copies_buffer = debug_num_copies_buffer,
|
||||
.close = debug_close,
|
||||
.dup = debug_dup,
|
||||
.enable_sequential_optimizations = debug_enable_sequential_optimizations,
|
||||
.end_position = debug_end_position,
|
||||
.write = debug_write,
|
||||
.write_buffer = debug_write_buffer,
|
||||
.release_write_buffer = debug_release_write_buffer,
|
||||
.read = debug_read,
|
||||
.read_buffer = debug_read_buffer,
|
||||
.release_read_buffer = debug_release_read_buffer,
|
||||
.force = debug_force,
|
||||
.force_range = debug_force_range,
|
||||
.error = 0
|
||||
/*.num_copies =*/ debug_num_copies,
|
||||
/*.num_copies_buffer =*/ debug_num_copies_buffer,
|
||||
/*.close =*/ debug_close,
|
||||
/*.dup =*/ debug_dup,
|
||||
/*.enable_sequential_optimizations =*/ debug_enable_sequential_optimizations,
|
||||
/*.end_position =*/ debug_end_position,
|
||||
/*.write_buffer =*/ debug_write_buffer,
|
||||
/*.release_write_buffer =*/ debug_release_write_buffer,
|
||||
/*.read_buffer =*/ debug_read_buffer,
|
||||
/*.release_read_buffer =*/ debug_release_read_buffer,
|
||||
/*.write =*/ debug_write,
|
||||
/*.read =*/ debug_read,
|
||||
/*.force =*/ debug_force,
|
||||
/*.async_force =*/ NULL,
|
||||
/*.force_range =*/ debug_force_range,
|
||||
/*.fallocate =*/ NULL,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ typedef struct file_impl {
|
|||
} file_impl;
|
||||
|
||||
static int updateEOF(stasis_handle_t * h) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
off_t pos = lseek(impl->fd, 0, SEEK_END);
|
||||
if(pos == (off_t)-1) {
|
||||
return errno;
|
||||
|
@ -46,7 +46,7 @@ static int file_close(stasis_handle_t * h) {
|
|||
else return errno;
|
||||
}
|
||||
static stasis_handle_t* file_dup(stasis_handle_t * h) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
return stasis_handle_open_file(impl->filename, impl->file_flags, impl->file_mode);
|
||||
}
|
||||
static void file_enable_sequential_optimizations(stasis_handle_t * h) {
|
||||
|
@ -348,7 +348,7 @@ static int file_release_read_buffer(stasis_read_buffer_t * r) {
|
|||
return 0;
|
||||
}
|
||||
static int file_force(stasis_handle_t * h) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
|
||||
if(!(impl->file_flags & O_SYNC)) {
|
||||
int fd = impl->fd;
|
||||
|
@ -365,7 +365,7 @@ static int file_force(stasis_handle_t * h) {
|
|||
return 0;
|
||||
}
|
||||
static int file_async_force(stasis_handle_t *h) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
int ret = 0;
|
||||
if(!(impl->file_flags & O_SYNC)) {
|
||||
// not opened synchronously; we need to explicitly sync.
|
||||
|
@ -407,7 +407,7 @@ static int file_async_force(stasis_handle_t *h) {
|
|||
return ret;
|
||||
}
|
||||
static int file_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
int ret = 0;
|
||||
if(!(impl->file_flags & O_SYNC)) {
|
||||
// not opened synchronously; we need to explicitly sync.
|
||||
|
@ -453,7 +453,7 @@ static int file_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
|||
return ret;
|
||||
}
|
||||
static int file_fallocate(struct stasis_handle_t* h, lsn_t off, lsn_t len) {
|
||||
file_impl * impl = h->impl;
|
||||
file_impl * impl = (file_impl *)h->impl;
|
||||
#ifdef HAVE_POSIX_FALLOCATE
|
||||
return posix_fallocate(impl->fd, off, len);
|
||||
#else
|
||||
|
@ -464,23 +464,23 @@ static int file_fallocate(struct stasis_handle_t* h, lsn_t off, lsn_t len) {
|
|||
}
|
||||
|
||||
struct stasis_handle_t file_func = {
|
||||
.num_copies = file_num_copies,
|
||||
.num_copies_buffer = file_num_copies_buffer,
|
||||
.close = file_close,
|
||||
.dup = file_dup,
|
||||
.enable_sequential_optimizations = file_enable_sequential_optimizations,
|
||||
.end_position = file_end_position,
|
||||
.write = file_write,
|
||||
.write_buffer = file_write_buffer,
|
||||
.release_write_buffer = file_release_write_buffer,
|
||||
.read = file_read,
|
||||
.read_buffer = file_read_buffer,
|
||||
.release_read_buffer = file_release_read_buffer,
|
||||
.force = file_force,
|
||||
.async_force = file_async_force,
|
||||
.force_range = file_force_range,
|
||||
.fallocate = file_fallocate,
|
||||
.error = 0
|
||||
/*.num_copies =*/ file_num_copies,
|
||||
/*.num_copies_buffer =*/ file_num_copies_buffer,
|
||||
/*.close =*/ file_close,
|
||||
/*.dup =*/ file_dup,
|
||||
/*.enable_sequential_optimizations =*/ file_enable_sequential_optimizations,
|
||||
/*.end_position =*/ file_end_position,
|
||||
/*.write_buffer =*/ file_write_buffer,
|
||||
/*.release_write_buffer =*/ file_release_write_buffer,
|
||||
/*.read_buffer =*/ file_read_buffer,
|
||||
/*.release_read_buffer =*/ file_release_read_buffer,
|
||||
/*.write =*/ file_write,
|
||||
/*.read =*/ file_read,
|
||||
/*.force =*/ file_force,
|
||||
/*.async_force =*/ file_async_force,
|
||||
/*.force_range =*/ file_force_range,
|
||||
/*.fallocate =*/ file_fallocate,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
stasis_handle_t * stasis_handle(open_file)(const char * filename, int flags, int mode) {
|
||||
|
|
|
@ -12,7 +12,7 @@ static int mem_num_copies(stasis_handle_t * h) { return 1; }
|
|||
static int mem_num_copies_buffer(stasis_handle_t * h) { return 0; }
|
||||
|
||||
static int mem_close(stasis_handle_t * h) {
|
||||
mem_impl *impl = h->impl;
|
||||
mem_impl *impl = (mem_impl *)h->impl;
|
||||
(impl->refcount)--;
|
||||
if(impl->refcount) { return 0; }
|
||||
|
||||
|
@ -23,7 +23,7 @@ static int mem_close(stasis_handle_t * h) {
|
|||
return 0;
|
||||
}
|
||||
static stasis_handle_t * mem_dup(stasis_handle_t *h) {
|
||||
mem_impl *impl = h->impl;
|
||||
mem_impl *impl = (mem_impl *)h->impl;
|
||||
(impl->refcount)++;
|
||||
return h;
|
||||
}
|
||||
|
@ -165,21 +165,23 @@ static int mem_force_range(stasis_handle_t *h,lsn_t start, lsn_t stop) {
|
|||
}
|
||||
|
||||
struct stasis_handle_t mem_func = {
|
||||
.num_copies = mem_num_copies,
|
||||
.num_copies_buffer = mem_num_copies_buffer,
|
||||
.close = mem_close,
|
||||
.dup = mem_dup,
|
||||
.enable_sequential_optimizations = mem_enable_sequential_optimizations,
|
||||
.end_position = mem_end_position,
|
||||
.write = mem_write,
|
||||
.write_buffer = mem_write_buffer,
|
||||
.release_write_buffer = mem_release_write_buffer,
|
||||
.read = mem_read,
|
||||
.read_buffer = mem_read_buffer,
|
||||
.release_read_buffer = mem_release_read_buffer,
|
||||
.force = mem_force,
|
||||
.force_range = mem_force_range,
|
||||
.error = 0
|
||||
/*.num_copies =*/ mem_num_copies,
|
||||
/*.num_copies_buffer =*/ mem_num_copies_buffer,
|
||||
/*.close =*/ mem_close,
|
||||
/*.dup =*/ mem_dup,
|
||||
/*.enable_sequential_optimizations =*/ mem_enable_sequential_optimizations,
|
||||
/*.end_position =*/ mem_end_position,
|
||||
/*.write_buffer =*/ mem_write_buffer,
|
||||
/*.release_write_buffer =*/ mem_release_write_buffer,
|
||||
/*.read_buffer =*/ mem_read_buffer,
|
||||
/*.release_read_buffer =*/ mem_release_read_buffer,
|
||||
/*.write =*/ mem_write,
|
||||
/*.read =*/ mem_read,
|
||||
/*.force =*/ mem_force,
|
||||
/*.async_force =*/ mem_force,
|
||||
/*.force_range =*/ mem_force_range,
|
||||
/*.fallocate =*/ NULL,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
stasis_handle_t * stasis_handle(open_memory)(void) {
|
||||
|
|
|
@ -231,7 +231,7 @@ hack:
|
|||
|
||||
DEBUG("allocFastHandle(%lld)\n", off/PAGE_SIZE);
|
||||
|
||||
const tree_node * n = rblookup(RB_LULTEQ, np, impl->fast_handles);
|
||||
const tree_node * n = (const tree_node *)rblookup(RB_LULTEQ, np, impl->fast_handles);
|
||||
// this code only works when writes / reads are aligned to immutable
|
||||
// boundaries, and never cross boundaries.
|
||||
if((!n) ||
|
||||
|
@ -287,7 +287,7 @@ static inline const tree_node * findFastHandle(nbw_impl * impl, lsn_t off,
|
|||
tree_node * np = allocTreeNode(off, len);
|
||||
|
||||
pthread_mutex_lock(&impl->mut);
|
||||
const tree_node * n = rbfind(np, impl->fast_handles);
|
||||
const tree_node * n = (const tree_node *)rbfind(np, impl->fast_handles);
|
||||
if(n) ((tree_node*)n)->pin_count++;
|
||||
pthread_mutex_unlock(&impl->mut);
|
||||
|
||||
|
@ -335,7 +335,7 @@ static int nbw_num_copies_buffer(stasis_handle_t * h) {
|
|||
return 0;
|
||||
}
|
||||
static int nbw_close(stasis_handle_t * h) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
|
||||
pthread_mutex_lock(&impl->mut);
|
||||
|
||||
|
@ -387,7 +387,7 @@ static int nbw_close(stasis_handle_t * h) {
|
|||
return ret;
|
||||
}
|
||||
static stasis_handle_t * nbw_dup(stasis_handle_t *h) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
(impl->refcount)++;
|
||||
return h;
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ static void nbw_enable_sequential_optimizations(stasis_handle_t *h) {
|
|||
// TODO non blocking should pass sequential optimizations down to underlying handles.
|
||||
}
|
||||
static lsn_t nbw_end_position(stasis_handle_t *h) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
pthread_mutex_lock(&impl->mut);
|
||||
lsn_t ret = impl->end_pos;
|
||||
pthread_mutex_unlock(&impl->mut);
|
||||
|
@ -403,7 +403,7 @@ static lsn_t nbw_end_position(stasis_handle_t *h) {
|
|||
}
|
||||
static stasis_write_buffer_t * nbw_write_buffer(stasis_handle_t * h,
|
||||
lsn_t off, lsn_t len) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
const tree_node * n = allocFastHandle(impl, off, len);
|
||||
stasis_write_buffer_t * w = n->h->write_buffer(n->h, off, len);
|
||||
|
||||
|
@ -436,8 +436,8 @@ static stasis_write_buffer_t * nbw_write_buffer(stasis_handle_t * h,
|
|||
return ret;
|
||||
}
|
||||
static int nbw_release_write_buffer(stasis_write_buffer_t * w) {
|
||||
nbw_impl * impl = w->h->impl;
|
||||
write_buffer_impl * w_impl = w->impl;
|
||||
nbw_impl * impl = (nbw_impl *)w->h->impl;
|
||||
write_buffer_impl * w_impl = (write_buffer_impl *)w->impl;
|
||||
const tree_node * n = w_impl->n;
|
||||
w_impl->w->h->release_write_buffer(w_impl->w);
|
||||
releaseFastHandle(impl, n, DIRTY);
|
||||
|
@ -447,7 +447,7 @@ static int nbw_release_write_buffer(stasis_write_buffer_t * w) {
|
|||
}
|
||||
static stasis_read_buffer_t * nbw_read_buffer(stasis_handle_t * h,
|
||||
lsn_t off, lsn_t len) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
const tree_node * n = findFastHandle(impl, off, len);
|
||||
stasis_read_buffer_t * r;
|
||||
stasis_handle_t * r_h = n ? n->h : getSlowHandle(impl);
|
||||
|
@ -468,8 +468,8 @@ static stasis_read_buffer_t * nbw_read_buffer(stasis_handle_t * h,
|
|||
return ret;
|
||||
}
|
||||
static int nbw_release_read_buffer(stasis_read_buffer_t * r) {
|
||||
nbw_impl * impl = r->h->impl;
|
||||
read_buffer_impl * r_impl = r->impl;
|
||||
nbw_impl * impl = (nbw_impl *)r->h->impl;
|
||||
read_buffer_impl * r_impl = (read_buffer_impl *)r->impl;
|
||||
const tree_node * n = r_impl->n;
|
||||
stasis_handle_t * oldHandle = r_impl->r->h;
|
||||
r_impl->r->h->release_read_buffer(r_impl->r);
|
||||
|
@ -487,7 +487,7 @@ static int nbw_release_read_buffer(stasis_read_buffer_t * r) {
|
|||
}
|
||||
static int nbw_write(stasis_handle_t * h, lsn_t off,
|
||||
const byte * dat, lsn_t len) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
const tree_node * n = allocFastHandle(impl, off, len);
|
||||
int ret = n->h->write(n->h, off, dat, len);
|
||||
releaseFastHandle(impl, n, DIRTY);
|
||||
|
@ -506,7 +506,7 @@ static int nbw_write(stasis_handle_t * h, lsn_t off,
|
|||
}
|
||||
static int nbw_read(stasis_handle_t * h,
|
||||
lsn_t off, byte * buf, lsn_t len) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
const tree_node * n = findFastHandle(impl, off, len);
|
||||
int ret;
|
||||
// XXX should be handled by releaseFastHandle.
|
||||
|
@ -521,13 +521,13 @@ static int nbw_read(stasis_handle_t * h,
|
|||
return ret;
|
||||
}
|
||||
static int nbw_force_range_impl(stasis_handle_t * h, lsn_t start, lsn_t stop) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
// pthread_mutex_lock(&impl->mut);
|
||||
tree_node scratch;
|
||||
scratch.start_pos = start;
|
||||
scratch.end_pos = start+1;
|
||||
if(!stop) stop = impl->end_pos;
|
||||
const tree_node * n = rblookup(RB_LUGTEQ,&scratch,impl->fast_handles); // min)(impl->fast_handles);
|
||||
const tree_node * n = (const tree_node *)rblookup(RB_LUGTEQ,&scratch,impl->fast_handles); // min)(impl->fast_handles);
|
||||
int blocked = 0;
|
||||
while(n) {
|
||||
if(n->start_pos >= stop) { break; }
|
||||
|
@ -536,18 +536,18 @@ static int nbw_force_range_impl(stasis_handle_t * h, lsn_t start, lsn_t stop) {
|
|||
((tree_node*)n)->dirty = NEEDS_FORCE;
|
||||
blocked = 1;
|
||||
}
|
||||
n = rblookup(RB_LUNEXT,n,impl->fast_handles);
|
||||
n = (const tree_node *)rblookup(RB_LUNEXT,n,impl->fast_handles);
|
||||
}
|
||||
pthread_cond_broadcast(&impl->pending_writes_cond);
|
||||
while(blocked) {
|
||||
pthread_cond_wait(&impl->force_completed_cond,&impl->mut);
|
||||
blocked = 0;
|
||||
n = rbmin(impl->fast_handles);
|
||||
n = (const tree_node *)rbmin(impl->fast_handles);
|
||||
while(n) {
|
||||
if(n->dirty == NEEDS_FORCE) {
|
||||
blocked = 1;
|
||||
}
|
||||
n = rblookup(RB_LUNEXT,n,impl->fast_handles);
|
||||
n = (const tree_node *)rblookup(RB_LUNEXT,n,impl->fast_handles);
|
||||
}
|
||||
}
|
||||
int ret = 0;
|
||||
|
@ -567,7 +567,7 @@ static int nbw_force_range_impl(stasis_handle_t * h, lsn_t start, lsn_t stop) {
|
|||
return ret;
|
||||
}
|
||||
static int nbw_force(stasis_handle_t * h) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
pthread_mutex_lock(&impl->mut);
|
||||
int ret = nbw_force_range_impl(h, 0, impl->end_pos);
|
||||
pthread_mutex_unlock(&impl->mut);
|
||||
|
@ -576,7 +576,7 @@ static int nbw_force(stasis_handle_t * h) {
|
|||
static int nbw_force_range(stasis_handle_t * h,
|
||||
lsn_t start,
|
||||
lsn_t stop) {
|
||||
nbw_impl * impl = h->impl;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
pthread_mutex_lock(&impl->mut);
|
||||
int ret = nbw_force_range_impl(h, start, stop);
|
||||
pthread_mutex_unlock(&impl->mut);
|
||||
|
@ -584,21 +584,23 @@ static int nbw_force_range(stasis_handle_t * h,
|
|||
}
|
||||
|
||||
struct stasis_handle_t nbw_func = {
|
||||
.num_copies = nbw_num_copies,
|
||||
.num_copies_buffer = nbw_num_copies_buffer,
|
||||
.close = nbw_close,
|
||||
.dup = nbw_dup,
|
||||
.enable_sequential_optimizations = nbw_enable_sequential_optimizations,
|
||||
.end_position = nbw_end_position,
|
||||
.write = nbw_write,
|
||||
.write_buffer = nbw_write_buffer,
|
||||
.release_write_buffer = nbw_release_write_buffer,
|
||||
.read = nbw_read,
|
||||
.read_buffer = nbw_read_buffer,
|
||||
.release_read_buffer = nbw_release_read_buffer,
|
||||
.force = nbw_force,
|
||||
.force_range = nbw_force_range,
|
||||
.error = 0
|
||||
/*.num_copies =*/ nbw_num_copies,
|
||||
/*.num_copies_buffer =*/ nbw_num_copies_buffer,
|
||||
/*.close =*/ nbw_close,
|
||||
/*.dup =*/ nbw_dup,
|
||||
/*.enable_sequential_optimizations =*/ nbw_enable_sequential_optimizations,
|
||||
/*.end_position =*/ nbw_end_position,
|
||||
/*.write_buffer =*/ nbw_write_buffer,
|
||||
/*.release_write_buffer =*/ nbw_release_write_buffer,
|
||||
/*.read_buffer =*/ nbw_read_buffer,
|
||||
/*.release_read_buffer =*/ nbw_release_read_buffer,
|
||||
/*.write =*/ nbw_write,
|
||||
/*.read =*/ nbw_read,
|
||||
/*.force =*/ nbw_force,
|
||||
/*.async_force =*/ NULL,
|
||||
/*.force_range =*/ nbw_force_range,
|
||||
/*.fallocate =*/ NULL,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -615,8 +617,8 @@ struct stasis_handle_t nbw_func = {
|
|||
|
||||
*/
|
||||
static void * nbw_worker(void * handle) {
|
||||
stasis_handle_t * h = handle;
|
||||
nbw_impl * impl = h->impl;
|
||||
stasis_handle_t * h = (stasis_handle_t *)handle;
|
||||
nbw_impl * impl = (nbw_impl *)h->impl;
|
||||
|
||||
stasis_handle_t * slow = getSlowHandle(impl);
|
||||
|
||||
|
|
|
@ -80,11 +80,11 @@ static int pfile_close(stasis_handle_t *h) {
|
|||
}
|
||||
|
||||
static stasis_handle_t * pfile_dup(stasis_handle_t *h) {
|
||||
pfile_impl *impl = h->impl;
|
||||
pfile_impl *impl = (pfile_impl *)h->impl;
|
||||
return stasis_handle_open_pfile(impl->filename, impl->file_flags, impl->file_mode);
|
||||
}
|
||||
static void pfile_enable_sequential_optimizations(stasis_handle_t *h) {
|
||||
pfile_impl *impl = h->impl;
|
||||
pfile_impl *impl = (pfile_impl *)h->impl;
|
||||
impl->sequential = 1;
|
||||
#ifdef HAVE_POSIX_FADVISE
|
||||
int err = posix_fadvise(impl->fd, 0, 0, POSIX_FADV_SEQUENTIAL);
|
||||
|
@ -289,7 +289,7 @@ static int pfile_release_read_buffer(stasis_read_buffer_t *r) {
|
|||
}
|
||||
static int pfile_force(stasis_handle_t *h) {
|
||||
TICK(force_hist);
|
||||
pfile_impl *impl = h->impl;
|
||||
pfile_impl *impl = (pfile_impl *)h->impl;
|
||||
if(!(impl->file_flags & O_SYNC)) {
|
||||
#ifdef HAVE_FDATASYNC
|
||||
DEBUG("pfile_force() is calling fdatasync()\n");
|
||||
|
@ -312,7 +312,7 @@ static int pfile_force(stasis_handle_t *h) {
|
|||
}
|
||||
static int pfile_async_force(stasis_handle_t *h) {
|
||||
TICK(force_range_hist);
|
||||
pfile_impl * impl = h->impl;
|
||||
pfile_impl * impl = (pfile_impl *)h->impl;
|
||||
#ifdef HAVE_SYNC_FILE_RANGE
|
||||
// stop of zero syncs to eof.
|
||||
DEBUG("pfile_force_range calling sync_file_range %lld %lld\n",
|
||||
|
@ -349,7 +349,7 @@ static int pfile_async_force(stasis_handle_t *h) {
|
|||
}
|
||||
static int pfile_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
||||
TICK(force_range_hist);
|
||||
pfile_impl * impl = h->impl;
|
||||
pfile_impl * impl = (pfile_impl *)h->impl;
|
||||
#ifdef HAVE_SYNC_FILE_RANGE
|
||||
// stop of zero syncs to eof.
|
||||
DEBUG("pfile_force_range calling sync_file_range %lld %lld\n",
|
||||
|
@ -386,7 +386,7 @@ static int pfile_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
|||
return ret;
|
||||
}
|
||||
static int pfile_fallocate(struct stasis_handle_t* h, lsn_t off, lsn_t len) {
|
||||
pfile_impl * impl = h->impl;
|
||||
pfile_impl * impl = (pfile_impl *)h->impl;
|
||||
#ifdef HAVE_POSIX_FALLOCATE
|
||||
return posix_fallocate(impl->fd, off, len);
|
||||
#else
|
||||
|
@ -396,23 +396,23 @@ static int pfile_fallocate(struct stasis_handle_t* h, lsn_t off, lsn_t len) {
|
|||
#endif
|
||||
}
|
||||
static struct stasis_handle_t pfile_func = {
|
||||
.num_copies = pfile_num_copies,
|
||||
.num_copies_buffer = pfile_num_copies_buffer,
|
||||
.close = pfile_close,
|
||||
.dup = pfile_dup,
|
||||
.enable_sequential_optimizations = pfile_enable_sequential_optimizations,
|
||||
.end_position = pfile_end_position,
|
||||
.write = pfile_write,
|
||||
.write_buffer = pfile_write_buffer,
|
||||
.release_write_buffer = pfile_release_write_buffer,
|
||||
.read = pfile_read,
|
||||
.read_buffer = pfile_read_buffer,
|
||||
.release_read_buffer = pfile_release_read_buffer,
|
||||
.force = pfile_force,
|
||||
.async_force = pfile_async_force,
|
||||
.force_range = pfile_force_range,
|
||||
.fallocate = pfile_fallocate,
|
||||
.error = 0
|
||||
/*.num_copies =*/ pfile_num_copies,
|
||||
/*.num_copies_buffer =*/ pfile_num_copies_buffer,
|
||||
/*.close =*/ pfile_close,
|
||||
/*.dup =*/ pfile_dup,
|
||||
/*.enable_sequential_optimizations =*/ pfile_enable_sequential_optimizations,
|
||||
/*.end_position =*/ pfile_end_position,
|
||||
/*.write_buffer =*/ pfile_write_buffer,
|
||||
/*.release_write_buffer =*/ pfile_release_write_buffer,
|
||||
/*.read_buffer =*/ pfile_read_buffer,
|
||||
/*.release_read_buffer =*/ pfile_release_read_buffer,
|
||||
/*.write =*/ pfile_write,
|
||||
/*.read =*/ pfile_read,
|
||||
/*.force =*/ pfile_force,
|
||||
/*.async_force =*/ pfile_async_force,
|
||||
/*.force_range =*/ pfile_force_range,
|
||||
/*.fallocate =*/ pfile_fallocate,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
stasis_handle_t *stasis_handle(open_pfile)(const char *filename,
|
||||
|
|
|
@ -36,15 +36,15 @@ typedef struct raid0_impl {
|
|||
} raid0_impl;
|
||||
|
||||
static int raid0_num_copies(stasis_handle_t *h) {
|
||||
raid0_impl * i = h->impl;
|
||||
raid0_impl * i = (raid0_impl *)h->impl;
|
||||
return i->h[0]->num_copies(i->h[0]);
|
||||
}
|
||||
static int raid0_num_copies_buffer(stasis_handle_t *h) {
|
||||
raid0_impl * i = h->impl;
|
||||
raid0_impl * i = (raid0_impl *)h->impl;
|
||||
return i->h[0]->num_copies_buffer(i->h[0]);
|
||||
}
|
||||
static int raid0_close(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
int ret = 0;
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
int this_ret = r->h[i]->close(r->h[i]);
|
||||
|
@ -56,7 +56,7 @@ static int raid0_close(stasis_handle_t *h) {
|
|||
return ret;
|
||||
}
|
||||
static stasis_handle_t* raid0_dup(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
stasis_handle_t ** h_dup = stasis_malloc(r->handle_count, stasis_handle_t*);
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
h_dup[i] = r->h[i]->dup(r->h[i]);
|
||||
|
@ -66,13 +66,13 @@ static stasis_handle_t* raid0_dup(stasis_handle_t *h) {
|
|||
return ret;
|
||||
}
|
||||
static void raid0_enable_sequential_optimizations(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
r->h[i]->enable_sequential_optimizations(r->h[i]);
|
||||
}
|
||||
}
|
||||
static lsn_t raid0_end_position(stasis_handle_t *h) {
|
||||
raid0_impl *r = h->impl;
|
||||
raid0_impl *r = (raid0_impl *)h->impl;
|
||||
lsn_t max_end = 0;
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
lsn_t this_end = r->h[i]->end_position(r->h[i]) + (i * r->stripe_size);
|
||||
|
@ -102,19 +102,19 @@ static lsn_t raid0_calc_off(raid0_impl * r, lsn_t off, lsn_t len) {
|
|||
return block * r->stripe_size + (off % r->stripe_size);
|
||||
}
|
||||
static int raid0_read(stasis_handle_t *h, lsn_t off, byte *buf, lsn_t len) {
|
||||
raid0_impl *r = h->impl;
|
||||
raid0_impl *r = (raid0_impl *)h->impl;
|
||||
int stripe = raid0_calc_stripe(r, off, len);
|
||||
lsn_t stripe_off = raid0_calc_off(r, off, len);
|
||||
return r->h[stripe]->read(r->h[stripe], stripe_off, buf, len);
|
||||
}
|
||||
static int raid0_write(stasis_handle_t *h, lsn_t off, const byte *dat, lsn_t len) {
|
||||
raid0_impl *r = h->impl;
|
||||
raid0_impl *r = (raid0_impl *)h->impl;
|
||||
int stripe = raid0_calc_stripe(r, off, len);
|
||||
lsn_t stripe_off = raid0_calc_off(r, off, len);
|
||||
return r->h[stripe]->write(r->h[stripe], stripe_off, dat, len);
|
||||
}
|
||||
static stasis_write_buffer_t * raid0_write_buffer(stasis_handle_t *h, lsn_t off, lsn_t len) {
|
||||
raid0_impl *r = h->impl;
|
||||
raid0_impl *r = (raid0_impl *)h->impl;
|
||||
int stripe = raid0_calc_stripe(r, off, len);
|
||||
lsn_t stripe_off = raid0_calc_off(r, off, len);
|
||||
return r->h[stripe]->write_buffer(r->h[stripe], stripe_off, len);
|
||||
|
@ -124,7 +124,7 @@ static int raid0_release_write_buffer(stasis_write_buffer_t *w) {
|
|||
}
|
||||
static stasis_read_buffer_t *raid0_read_buffer(stasis_handle_t *h,
|
||||
lsn_t off, lsn_t len) {
|
||||
raid0_impl *r = h->impl;
|
||||
raid0_impl *r = (raid0_impl *)h->impl;
|
||||
int stripe = raid0_calc_stripe(r, off, len);
|
||||
lsn_t stripe_off = raid0_calc_off(r, off, len);
|
||||
return r->h[stripe]->read_buffer(r->h[stripe], stripe_off, len);
|
||||
|
@ -133,7 +133,7 @@ static int raid0_release_read_buffer(stasis_read_buffer_t *r) {
|
|||
return r->h->release_read_buffer(r);
|
||||
}
|
||||
static int raid0_force(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
int ret = 0;
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
int this_ret = r->h[i]->force(r->h[i]);
|
||||
|
@ -148,7 +148,7 @@ static int raid0_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
|||
return raid0_force(h);
|
||||
}
|
||||
static int raid0_async_force(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
int ret = 0;
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
int this_ret = r->h[i]->async_force(r->h[i]);
|
||||
|
@ -157,7 +157,7 @@ static int raid0_async_force(stasis_handle_t *h) {
|
|||
return ret;
|
||||
}
|
||||
static int raid0_fallocate(stasis_handle_t *h, lsn_t off, lsn_t len) {
|
||||
raid0_impl * r = h->impl;
|
||||
raid0_impl * r = (raid0_impl *)h->impl;
|
||||
int ret = 0;
|
||||
lsn_t start_block = raid0_calc_block(r, off, 0);
|
||||
lsn_t start_off = (start_block) * r->stripe_size;
|
||||
|
@ -171,23 +171,23 @@ static int raid0_fallocate(stasis_handle_t *h, lsn_t off, lsn_t len) {
|
|||
return ret;
|
||||
}
|
||||
struct stasis_handle_t raid0_func = {
|
||||
.num_copies = raid0_num_copies,
|
||||
.num_copies_buffer = raid0_num_copies_buffer,
|
||||
.close = raid0_close,
|
||||
.dup = raid0_dup,
|
||||
.enable_sequential_optimizations = raid0_enable_sequential_optimizations,
|
||||
.end_position = raid0_end_position,
|
||||
.write = raid0_write,
|
||||
.write_buffer = raid0_write_buffer,
|
||||
.release_write_buffer = raid0_release_write_buffer,
|
||||
.read = raid0_read,
|
||||
.read_buffer = raid0_read_buffer,
|
||||
.release_read_buffer = raid0_release_read_buffer,
|
||||
.force = raid0_force,
|
||||
.async_force = raid0_async_force,
|
||||
.force_range = raid0_force_range,
|
||||
.fallocate = raid0_fallocate,
|
||||
.error = 0
|
||||
/*.num_copies =*/ raid0_num_copies,
|
||||
/*.num_copies_buffer =*/ raid0_num_copies_buffer,
|
||||
/*.close =*/ raid0_close,
|
||||
/*.dup =*/ raid0_dup,
|
||||
/*.enable_sequential_optimizations =*/ raid0_enable_sequential_optimizations,
|
||||
/*.end_position =*/ raid0_end_position,
|
||||
/*.write_buffer =*/ raid0_write_buffer,
|
||||
/*.release_write_buffer =*/ raid0_release_write_buffer,
|
||||
/*.read_buffer =*/ raid0_read_buffer,
|
||||
/*.release_read_buffer =*/ raid0_release_read_buffer,
|
||||
/*.write =*/ raid0_write,
|
||||
/*.read =*/ raid0_read,
|
||||
/*.force =*/ raid0_force,
|
||||
/*.async_force =*/ raid0_async_force,
|
||||
/*.force_range =*/ raid0_force_range,
|
||||
/*.fallocate =*/ raid0_fallocate,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
stasis_handle_t * stasis_handle_open_raid0(int handle_count, stasis_handle_t** h, uint32_t stripe_size) {
|
||||
|
|
|
@ -29,15 +29,15 @@ typedef struct raid1_impl {
|
|||
} raid1_impl;
|
||||
|
||||
static int raid1_num_copies(stasis_handle_t *h) {
|
||||
raid1_impl * i = h->impl;
|
||||
raid1_impl * i = (raid1_impl *)h->impl;
|
||||
return i->a->num_copies(i->a);
|
||||
}
|
||||
static int raid1_num_copies_buffer(stasis_handle_t *h) {
|
||||
raid1_impl * i = h->impl;
|
||||
raid1_impl * i = (raid1_impl *)h->impl;
|
||||
return i->a->num_copies_buffer(i->a);
|
||||
}
|
||||
static int raid1_close(stasis_handle_t *h) {
|
||||
raid1_impl * i = h->impl;
|
||||
raid1_impl * i = (raid1_impl *)h->impl;
|
||||
int reta = i->a->close(i->a);
|
||||
int retb = i->b->close(i->b);
|
||||
free(i);
|
||||
|
@ -45,20 +45,20 @@ static int raid1_close(stasis_handle_t *h) {
|
|||
return reta ? reta : retb;
|
||||
}
|
||||
static stasis_handle_t* raid1_dup(stasis_handle_t *h) {
|
||||
raid1_impl * i = h->impl;
|
||||
raid1_impl * i = (raid1_impl *)h->impl;
|
||||
return stasis_handle_open_raid1(i->a->dup(i->a), i->b->dup(i->b));
|
||||
}
|
||||
static void raid1_enable_sequential_optimizations(stasis_handle_t *h) {
|
||||
raid1_impl * i = h->impl;
|
||||
raid1_impl * i = (raid1_impl *)h->impl;
|
||||
i->a->enable_sequential_optimizations(i->a);
|
||||
i->b->enable_sequential_optimizations(i->b);
|
||||
}
|
||||
static lsn_t raid1_end_position(stasis_handle_t *h) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
return i->a->end_position(i->a);
|
||||
}
|
||||
static int raid1_read(stasis_handle_t *h, lsn_t off, byte *buf, lsn_t len) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, 0);
|
||||
// use some low bit that's likely "real" as a source of randomness
|
||||
|
@ -69,19 +69,19 @@ static int raid1_read(stasis_handle_t *h, lsn_t off, byte *buf, lsn_t len) {
|
|||
}
|
||||
}
|
||||
static int raid1_write(stasis_handle_t *h, lsn_t off, const byte *dat, lsn_t len) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
int retA = i->a->write(i->a, off, dat, len);
|
||||
int retB = i->b->write(i->b, off, dat, len);
|
||||
return retA ? retA : retB;
|
||||
}
|
||||
static stasis_write_buffer_t * raid1_write_buffer(stasis_handle_t *h, lsn_t off, lsn_t len) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
stasis_write_buffer_t * ret = i->a->write_buffer(i->a, off, len);
|
||||
ret->h = h;
|
||||
return ret;
|
||||
}
|
||||
static int raid1_release_write_buffer(stasis_write_buffer_t *w) {
|
||||
raid1_impl *i = w->h->impl;
|
||||
raid1_impl *i = (raid1_impl *)w->h->impl;
|
||||
w->h = i->a;
|
||||
assert(w->h == i->a);
|
||||
int retA = i->b->write(i->b, w->off, w->buf, w->len);
|
||||
|
@ -90,7 +90,7 @@ static int raid1_release_write_buffer(stasis_write_buffer_t *w) {
|
|||
}
|
||||
static stasis_read_buffer_t *raid1_read_buffer(stasis_handle_t *h,
|
||||
lsn_t off, lsn_t len) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, 0);
|
||||
// use some low bit that's likely "real" as a source of randomness
|
||||
|
@ -105,47 +105,47 @@ static int raid1_release_read_buffer(stasis_read_buffer_t *r) {
|
|||
abort();
|
||||
}
|
||||
static int raid1_force(stasis_handle_t *h) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
int retA = i->a->force(i->a);
|
||||
int retB = i->b->force(i->b);
|
||||
return retA ? retA : retB;
|
||||
}
|
||||
static int raid1_async_force(stasis_handle_t *h) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
int retA = i->a->async_force(i->a);
|
||||
int retB = i->b->async_force(i->b);
|
||||
return retA ? retA : retB;
|
||||
}
|
||||
static int raid1_force_range(stasis_handle_t *h, lsn_t start, lsn_t stop) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
int retA = i->a->force_range(i->a, start, stop);
|
||||
int retB = i->b->force_range(i->b, start, stop);
|
||||
return retA ? retA : retB;
|
||||
}
|
||||
static int raid1_fallocate(stasis_handle_t *h, lsn_t off, lsn_t len) {
|
||||
raid1_impl *i = h->impl;
|
||||
raid1_impl *i = (raid1_impl *)h->impl;
|
||||
int retA = i->a->fallocate(i->a, off, len);
|
||||
int retB = i->b->fallocate(i->b, off, len);
|
||||
return retA ? retA : retB;
|
||||
}
|
||||
struct stasis_handle_t raid1_func = {
|
||||
.num_copies = raid1_num_copies,
|
||||
.num_copies_buffer = raid1_num_copies_buffer,
|
||||
.close = raid1_close,
|
||||
.dup = raid1_dup,
|
||||
.enable_sequential_optimizations = raid1_enable_sequential_optimizations,
|
||||
.end_position = raid1_end_position,
|
||||
.write = raid1_write,
|
||||
.write_buffer = raid1_write_buffer,
|
||||
.release_write_buffer = raid1_release_write_buffer,
|
||||
.read = raid1_read,
|
||||
.read_buffer = raid1_read_buffer,
|
||||
.release_read_buffer = raid1_release_read_buffer,
|
||||
.force = raid1_force,
|
||||
.async_force = raid1_async_force,
|
||||
.force_range = raid1_force_range,
|
||||
.fallocate = raid1_fallocate,
|
||||
.error = 0
|
||||
/*.num_copies =*/ raid1_num_copies,
|
||||
/*.num_copies_buffer =*/ raid1_num_copies_buffer,
|
||||
/*.close =*/ raid1_close,
|
||||
/*.dup =*/ raid1_dup,
|
||||
/*.enable_sequential_optimizations =*/ raid1_enable_sequential_optimizations,
|
||||
/*.end_position =*/ raid1_end_position,
|
||||
/*.write_buffer =*/ raid1_write_buffer,
|
||||
/*.release_write_buffer =*/ raid1_release_write_buffer,
|
||||
/*.read_buffer =*/ raid1_read_buffer,
|
||||
/*.release_read_buffer =*/ raid1_release_read_buffer,
|
||||
/*.write =*/ raid1_write,
|
||||
/*.read =*/ raid1_read,
|
||||
/*.force =*/ raid1_force,
|
||||
/*.async_force =*/ raid1_async_force,
|
||||
/*.force_range =*/ raid1_force_range,
|
||||
/*.fallocate =*/ raid1_fallocate,
|
||||
/*.error =*/ 0
|
||||
};
|
||||
|
||||
stasis_handle_t * stasis_handle_open_raid1(stasis_handle_t* a, stasis_handle_t* b) {
|
||||
|
|
|
@ -16,8 +16,8 @@ struct rangeTracker {
|
|||
};
|
||||
|
||||
static int cmp_transition(const void * a, const void * b, const void * arg) {
|
||||
const transition * ta = a;
|
||||
const transition * tb = b;
|
||||
const transition * ta = (const transition *)a;
|
||||
const transition * tb = (const transition *)b;
|
||||
|
||||
return ta->pos - tb->pos;
|
||||
|
||||
|
@ -33,7 +33,7 @@ rangeTracker * rangeTrackerInit(int quantization) {
|
|||
void rangeTrackerDeinit(rangeTracker * rt) {
|
||||
RBLIST * l = RB_ENTRY(openlist)(rt->ranges);
|
||||
const transition * t;
|
||||
while((t = RB_ENTRY(readlist)(l))) {
|
||||
while((t = (const transition *)RB_ENTRY(readlist)(l))) {
|
||||
RB_ENTRY(delete)(t, rt->ranges);
|
||||
fprintf(stderr, "WARNING: Detected leaked range in rangeTracker!\n");
|
||||
// Discard const to free t
|
||||
|
@ -139,7 +139,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
|||
int in_range = 0;
|
||||
|
||||
RBLIST * list = RB_ENTRY(openlist) (rt->ranges);
|
||||
while((t = RB_ENTRY(readlist)(list))) {
|
||||
while((t = (const transition*)RB_ENTRY(readlist)(list))) {
|
||||
if(!(t->pins + t->delta)) {
|
||||
// end of a range.
|
||||
in_range = 0;
|
||||
|
@ -158,7 +158,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
|||
int next_range = 0;
|
||||
in_range = 0;
|
||||
list = RB_ENTRY(openlist) (rt->ranges);
|
||||
t = RB_ENTRY(readlist)(list);
|
||||
t = (const transition*)RB_ENTRY(readlist)(list);
|
||||
if(!t) {
|
||||
assert(range_count == 0);
|
||||
RB_ENTRY(closelist)(list);
|
||||
|
@ -171,7 +171,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
|||
ret[next_range]->start = t->pos;
|
||||
in_range = 1;
|
||||
}
|
||||
while((t = RB_ENTRY(readlist)(list))) {
|
||||
while((t = (const transition*)RB_ENTRY(readlist)(list))) {
|
||||
if(t->pins + t->delta) {
|
||||
if(!in_range) {
|
||||
assert(! ret[next_range]);
|
||||
|
@ -209,7 +209,7 @@ static void pinnedRanges(const rangeTracker * rt, const range * request, rangeTr
|
|||
expanded_range.start = rangeTrackerRoundDown(request->start, rt->quantization);
|
||||
expanded_range.stop = rangeTrackerRoundUp(request->stop, rt->quantization);
|
||||
|
||||
while((t = rblookup(RB_LUGREAT, t, rt->ranges))) {
|
||||
while((t = (const transition*)rblookup(RB_LUGREAT, t, rt->ranges))) {
|
||||
assert(t->delta);
|
||||
if(t->pos >= expanded_range.stop) {
|
||||
if(in_range) {
|
||||
|
@ -345,7 +345,7 @@ const transition ** rangeTrackerEnumerate(rangeTracker * rt) {
|
|||
int transitionCount = 0;
|
||||
const transition * t;
|
||||
RBLIST * list = RB_ENTRY(openlist) (rt->ranges);
|
||||
while((t = RB_ENTRY(readlist)(list))) {
|
||||
while((t = (const transition*)RB_ENTRY(readlist)(list))) {
|
||||
transitionCount++;
|
||||
}
|
||||
RB_ENTRY(closelist)(list);
|
||||
|
@ -355,7 +355,7 @@ const transition ** rangeTrackerEnumerate(rangeTracker * rt) {
|
|||
list = RB_ENTRY(openlist) (rt->ranges);
|
||||
int i = 0;
|
||||
|
||||
while((t = RB_ENTRY(readlist)(list))) {
|
||||
while((t = (const transition*)RB_ENTRY(readlist)(list))) {
|
||||
ret[i] = t;
|
||||
i++;
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ int stasis_log_file_pool_file_filter(struct dirent* file) {
|
|||
char * stasis_log_file_pool_build_filename(stasis_log_file_pool_state * fp,
|
||||
lsn_t start_lsn) {
|
||||
int name_len = strlen(stasis_log_chunk_name);
|
||||
char * first = malloc(name_len + stasis_log_file_pool_lsn_chars + 1);
|
||||
char * first = stasis_malloc(name_len + stasis_log_file_pool_lsn_chars + 1, char);
|
||||
strcpy(first, stasis_log_chunk_name);
|
||||
sprintf(first+name_len, "%020lld", start_lsn);
|
||||
DEBUG("Name is %s\n", first);
|
||||
|
@ -166,7 +166,7 @@ static ssize_t mypread(int fd, byte * buf, size_t sz, off_t off) {
|
|||
size_t rem = sz;
|
||||
while(rem) {
|
||||
DEBUG("pread(%d, %lld, %lld, %lld)\n", fd, (long long)(intptr_t)buf, (long long)rem, (long long)off);
|
||||
size_t ret = pread(fd, buf, rem, off);
|
||||
ssize_t ret = pread(fd, buf, rem, off);
|
||||
if(ret == -1) {
|
||||
perror("Error reading from log.");
|
||||
abort();
|
||||
|
@ -193,7 +193,7 @@ lsn_t stasis_log_file_pool_sizeof_internal_entry(stasis_log_t * log, const LogEn
|
|||
}
|
||||
// No latching requried.
|
||||
char * build_path(const char * dir, const char * file) {
|
||||
char * full_name = malloc(strlen(file) + 1 + strlen(dir) + 1);
|
||||
char * full_name = stasis_malloc(strlen(file) + 1 + strlen(dir) + 1, char);
|
||||
full_name[0] = 0;
|
||||
strcat(full_name, dir);
|
||||
strcat(full_name, "/");
|
||||
|
@ -222,7 +222,7 @@ void stasis_log_file_pool_chunk_open(stasis_log_file_pool_state * fp, int chunk)
|
|||
* @return chunk id or -1 if the offset is past the end of the live chunks.
|
||||
*/
|
||||
static int get_chunk_from_offset(stasis_log_t * log, lsn_t lsn) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
int chunk = -1;
|
||||
if(fp->live_offsets[fp->live_count-1] <= lsn && (fp->live_offsets[fp->live_count-1] + fp->target_chunk_size) > lsn) {
|
||||
return fp->live_count - 1;
|
||||
|
@ -234,9 +234,9 @@ static int get_chunk_from_offset(stasis_log_t * log, lsn_t lsn) {
|
|||
}
|
||||
static void stasis_log_file_pool_prealloc_file(stasis_log_file_pool_state * fp) {
|
||||
if(fp->dead_count < fp->dead_threshold) {
|
||||
char * tmpfile = "preallocating~";
|
||||
const char * tmpfile = "preallocating~";
|
||||
char * tmpfilepath = build_path(fp->dirname, tmpfile);
|
||||
size_t bufsz = PAGE_SIZE;
|
||||
ssize_t bufsz = PAGE_SIZE;
|
||||
pthread_mutex_unlock(&fp->mut);
|
||||
#ifdef HAVE_O_DSYNC
|
||||
int sync = O_DSYNC; // XXX cut and pasted from above...
|
||||
|
@ -254,7 +254,7 @@ static void stasis_log_file_pool_prealloc_file(stasis_log_file_pool_state * fp)
|
|||
printf("Writing zeros to empty log file...\n");
|
||||
byte * buffer = stasis_calloc(bufsz, byte);
|
||||
for(off_t i = 0; i <= fp->target_chunk_size; i += bufsz) {
|
||||
int ret = pwrite(fd, buffer, bufsz, i);
|
||||
ssize_t ret = pwrite(fd, buffer, bufsz, i);
|
||||
if(ret != bufsz) {
|
||||
perror("Couldn't write to empty log");
|
||||
abort();
|
||||
|
@ -267,7 +267,7 @@ static void stasis_log_file_pool_prealloc_file(stasis_log_file_pool_state * fp)
|
|||
|
||||
pthread_mutex_lock(&fp->mut);
|
||||
char * filenametmp = stasis_log_file_pool_build_filename(fp, fp->dead_count);
|
||||
char * filename = malloc(strlen(filenametmp) + 2);
|
||||
char * filename = stasis_malloc(strlen(filenametmp) + 2, char);
|
||||
strcpy(filename, filenametmp);
|
||||
strcat(filename, "~");
|
||||
char * newfilepath = build_path(fp->dirname, filename);
|
||||
|
@ -289,7 +289,7 @@ static void stasis_log_file_pool_prealloc_file(stasis_log_file_pool_state * fp)
|
|||
}
|
||||
}
|
||||
static void * stasis_log_file_pool_prealloc_worker(void * fpp) {
|
||||
stasis_log_file_pool_state * fp = fpp;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)fpp;
|
||||
|
||||
pthread_mutex_lock(&fp->mut);
|
||||
|
||||
|
@ -312,7 +312,7 @@ static void * stasis_log_file_pool_prealloc_worker(void * fpp) {
|
|||
* Does no latching. Modifies all mutable fields of fp.
|
||||
*/
|
||||
int stasis_log_file_pool_append_chunk(stasis_log_t * log, off_t new_offset) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
char * old_file = 0;
|
||||
char * new_file = stasis_log_file_pool_build_filename(fp, new_offset);
|
||||
char * new_path = build_path(fp->dirname, new_file);
|
||||
|
@ -350,8 +350,8 @@ int stasis_log_file_pool_append_chunk(stasis_log_t * log, off_t new_offset) {
|
|||
*/
|
||||
LogEntry * stasis_log_file_pool_reserve_entry(stasis_log_t * log, size_t szs) {
|
||||
uint32_t sz = szs;
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
lsn_t * handle = (lsn_t *)pthread_getspecific(fp->handle_key);
|
||||
if(!handle) { handle = stasis_alloc(lsn_t); pthread_setspecific(fp->handle_key, handle); }
|
||||
|
||||
uint64_t framed_size = sz+sizeof(uint32_t)+sizeof(uint32_t);
|
||||
|
@ -384,8 +384,8 @@ LogEntry * stasis_log_file_pool_reserve_entry(stasis_log_t * log, size_t szs) {
|
|||
* Does no latching. Everything is thread local, except the call to ringbuffer.
|
||||
*/
|
||||
int stasis_log_file_pool_write_entry_done(stasis_log_t * log, LogEntry * e) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
lsn_t * handle = (lsn_t *)pthread_getspecific(fp->handle_key);
|
||||
assert(handle);
|
||||
|
||||
stasis_ringbuffer_reading_writer_done(fp->ring, handle);
|
||||
|
@ -396,8 +396,8 @@ int stasis_log_file_pool_write_entry_done(stasis_log_t * log, LogEntry * e) {
|
|||
* ringbuffer), and the call to ringbuffer, everything is thread local.
|
||||
*/
|
||||
int stasis_log_file_pool_write_entry(stasis_log_t * log, LogEntry * e) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
lsn_t * handle = (lsn_t *)pthread_getspecific(fp->handle_key);
|
||||
assert(handle);
|
||||
|
||||
byte * buf = (byte*)e;
|
||||
|
@ -418,7 +418,7 @@ int stasis_log_file_pool_write_entry(stasis_log_t * log, LogEntry * e) {
|
|||
* protected from being closed by truncation.
|
||||
*/
|
||||
const LogEntry* stasis_log_file_pool_chunk_read_entry(stasis_log_file_pool_state * fp, int fd, lsn_t file_offset, lsn_t lsn, uint32_t * len) {
|
||||
int err;
|
||||
ssize_t err;
|
||||
if(sizeof(*len) != (err = mypread(fd, (byte*)len, sizeof(*len), lsn-file_offset))) {
|
||||
if(err == 0) { DEBUG(stderr, "EOF reading len from log\n"); return 0; }
|
||||
abort();
|
||||
|
@ -437,7 +437,7 @@ const LogEntry* stasis_log_file_pool_chunk_read_entry(stasis_log_file_pool_state
|
|||
}
|
||||
}
|
||||
|
||||
byte * buf = malloc(*len + sizeof(uint32_t));
|
||||
byte * buf = stasis_malloc(*len + sizeof(uint32_t), byte);
|
||||
if(!buf) {
|
||||
fprintf(stderr, "Couldn't alloc memory for log entry of size %lld. "
|
||||
"This could be due to corruption at the end of the log. Conservatively bailing out.",
|
||||
|
@ -477,7 +477,7 @@ int stasis_log_file_pool_chunk_write_buffer(int fd, const byte * buf, size_t sz,
|
|||
return 1;
|
||||
}
|
||||
const LogEntry* stasis_log_file_pool_read_entry(struct stasis_log_t* log, lsn_t lsn) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
if(fp->ring) {
|
||||
// Force bytes containing length of log entry to disk.
|
||||
if(stasis_ringbuffer_get_write_frontier(fp->ring) > lsn) {
|
||||
|
@ -526,41 +526,41 @@ lsn_t stasis_log_file_pool_next_entry(struct stasis_log_t* log, const LogEntry *
|
|||
*/
|
||||
lsn_t stasis_log_file_pool_first_unstable_lsn(struct stasis_log_t* log, stasis_log_force_mode_t mode) {
|
||||
// TODO this ignores mode...
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
return stasis_ringbuffer_get_read_tail(fp->ring);
|
||||
}
|
||||
/**
|
||||
* Does no latching. Relies on ringbuffer for synchronization.
|
||||
*/
|
||||
lsn_t stasis_log_file_pool_first_pending_lsn(struct stasis_log_t* log) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
return stasis_ringbuffer_get_write_tail(fp->ring);
|
||||
}
|
||||
/**
|
||||
* Does no latching. Relies on ringbuffer for synchronization.
|
||||
*/
|
||||
void stasis_log_file_pool_force_tail(struct stasis_log_t* log, stasis_log_force_mode_t mode) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
stasis_ringbuffer_flush(fp->ring, stasis_ringbuffer_get_write_frontier(fp->ring));
|
||||
}
|
||||
/**
|
||||
* Does no latching. Relies on ringbuffer for synchronization.
|
||||
*/
|
||||
lsn_t stasis_log_file_pool_next_available_lsn(stasis_log_t *log) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
return stasis_ringbuffer_get_write_frontier(fp->ring);//nextAvailableLSN;
|
||||
}
|
||||
/**
|
||||
* Modifies all fields of fp. Holds latches.
|
||||
*/
|
||||
int stasis_log_file_pool_truncate(struct stasis_log_t* log, lsn_t lsn) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
pthread_mutex_lock(&fp->mut);
|
||||
int chunk = get_chunk_from_offset(log, lsn);
|
||||
int dead_offset = fp->dead_count;
|
||||
fp->dead_filenames = stasis_realloc(fp->dead_filenames, dead_offset + chunk, char*);
|
||||
for(int i = 0; i < chunk; i++) {
|
||||
fp->dead_filenames[dead_offset + i] = malloc(strlen(fp->live_filenames[i]) + 2);
|
||||
fp->dead_filenames[dead_offset + i] = (char*)malloc(strlen(fp->live_filenames[i]) + 2);
|
||||
fp->dead_filenames[dead_offset + i][0] = 0;
|
||||
strcat(fp->dead_filenames[dead_offset + i], fp->live_filenames[i]);
|
||||
strcat(fp->dead_filenames[dead_offset + i], "~");
|
||||
|
@ -592,7 +592,7 @@ int stasis_log_file_pool_truncate(struct stasis_log_t* log, lsn_t lsn) {
|
|||
* Grabs mut so that it can safely read fp->live_offsets[0].
|
||||
*/
|
||||
lsn_t stasis_log_file_pool_truncation_point(struct stasis_log_t* log) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
pthread_mutex_lock(&fp->mut);
|
||||
lsn_t ret = fp->live_offsets[0];
|
||||
pthread_mutex_unlock(&fp->mut);
|
||||
|
@ -605,7 +605,7 @@ lsn_t stasis_log_file_pool_chunk_scrub_to_eof(stasis_log_t * log, int fd, lsn_t
|
|||
lsn_t cur_off = file_off;
|
||||
const LogEntry * e;
|
||||
uint32_t len;
|
||||
while((e = stasis_log_file_pool_chunk_read_entry(log->impl, fd, file_off, cur_off, &len))) {
|
||||
while((e = stasis_log_file_pool_chunk_read_entry((stasis_log_file_pool_state *)log->impl, fd, file_off, cur_off, &len))) {
|
||||
cur_off = log->next_entry(log, e);
|
||||
log->read_entry_done(log, e);
|
||||
}
|
||||
|
@ -616,9 +616,9 @@ lsn_t stasis_log_file_pool_chunk_scrub_to_eof(stasis_log_t * log, int fd, lsn_t
|
|||
* first thing it does is shut down the writeback thread.
|
||||
*/
|
||||
int stasis_log_file_pool_close(stasis_log_t * log) {
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
|
||||
log->force_tail(log, 0); /// xxx use real constant for wal mode..
|
||||
log->force_tail(log, LOG_FORCE_COMMIT);
|
||||
stasis_ringbuffer_shutdown(fp->ring);
|
||||
|
||||
fp->shutdown = 1;
|
||||
|
@ -648,8 +648,8 @@ int stasis_log_file_pool_close(stasis_log_t * log) {
|
|||
}
|
||||
|
||||
void * stasis_log_file_pool_writeback_worker(void * arg) {
|
||||
stasis_log_t * log = arg;
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
stasis_log_t * log = (stasis_log_t *)arg;
|
||||
stasis_log_file_pool_state * fp = (stasis_log_file_pool_state *)log->impl;
|
||||
|
||||
lsn_t handle;
|
||||
lsn_t off;
|
||||
|
|
|
@ -27,7 +27,7 @@ static lsn_t stasis_log_impl_in_memory_first_pending_lsn(stasis_log_t * log) {
|
|||
return INVALID_LSN; // we're running with recovery disabled, so don't bother
|
||||
}
|
||||
static lsn_t stasis_log_impl_in_memory_next_available_lsn(stasis_log_t * log) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
writelock(impl->flushedLSN_lock,0);
|
||||
writelock(impl->globalOffset_lock,0);
|
||||
lsn_t ret = impl->nextAvailableLSN;
|
||||
|
@ -37,7 +37,7 @@ static lsn_t stasis_log_impl_in_memory_next_available_lsn(stasis_log_t * log) {
|
|||
}
|
||||
|
||||
static int stasis_log_impl_in_memory_write_entry(stasis_log_t * log, LogEntry *e) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
// XXX release these earlier?
|
||||
unlock(impl->globalOffset_lock);
|
||||
unlock(impl->flushedLSN_lock);
|
||||
|
@ -45,11 +45,11 @@ static int stasis_log_impl_in_memory_write_entry(stasis_log_t * log, LogEntry *e
|
|||
}
|
||||
|
||||
LogEntry* stasis_log_impl_in_memory_reserve_entry(struct stasis_log_t* log, size_t sz) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
/** Use calloc since the entry might not be packed in memory;
|
||||
otherwise, we'd leak uninitialized bytes to the log. */
|
||||
|
||||
LogEntry * e = calloc(1,sz);
|
||||
LogEntry * e = (LogEntry*)calloc(1,sz);
|
||||
|
||||
lsn_t bufferOffset;
|
||||
int done = 0;
|
||||
|
@ -96,7 +96,7 @@ int stasis_log_impl_in_memory_entry_done(struct stasis_log_t* log, LogEntry* e)
|
|||
|
||||
static lsn_t stasis_log_impl_in_memory_first_unstable_lsn(stasis_log_t* log,
|
||||
stasis_log_force_mode_t mode) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
return impl->nextAvailableLSN;
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ static lsn_t stasis_log_impl_in_memory_next_entry(stasis_log_t * log, const LogE
|
|||
}
|
||||
|
||||
static int stasis_log_impl_in_memory_truncate(stasis_log_t * log, lsn_t lsn) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
writelock(impl->flushedLSN_lock,1);
|
||||
writelock(impl->globalOffset_lock,1);
|
||||
|
||||
|
@ -133,12 +133,12 @@ static int stasis_log_impl_in_memory_truncate(stasis_log_t * log, lsn_t lsn) {
|
|||
}
|
||||
|
||||
static lsn_t stasis_log_impl_in_memory_truncation_point(stasis_log_t * log) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
return impl->globalOffset;
|
||||
}
|
||||
|
||||
static int stasis_log_impl_in_memory_close(stasis_log_t * log) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
if(impl->buffer) {
|
||||
lsn_t firstEmptyOffset = impl->nextAvailableLSN-impl->globalOffset;
|
||||
for(lsn_t i = 0; i < firstEmptyOffset; i++) {
|
||||
|
@ -158,7 +158,7 @@ static int stasis_log_impl_in_memory_close(stasis_log_t * log) {
|
|||
|
||||
static const LogEntry * stasis_log_impl_in_memory_read_entry(stasis_log_t* log,
|
||||
lsn_t lsn) {
|
||||
stasis_log_impl_in_memory * impl = log->impl;
|
||||
stasis_log_impl_in_memory * impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
DEBUG("lsn: %ld\n", lsn);
|
||||
readlock(impl->globalOffset_lock, 0);
|
||||
if(lsn >= impl->nextAvailableLSN) {
|
||||
|
@ -184,7 +184,7 @@ static lsn_t stasis_log_impl_in_memory_sizeof_internal_entry(stasis_log_t* log,
|
|||
}
|
||||
static int stasis_log_impl_in_memory_is_durable(stasis_log_t*log) { return 0; }
|
||||
static void stasis_log_impl_in_memory_set_truncation(stasis_log_t *log, stasis_truncation_t *trunc) {
|
||||
stasis_log_impl_in_memory *impl = log->impl;
|
||||
stasis_log_impl_in_memory *impl = (stasis_log_impl_in_memory *)log->impl;
|
||||
impl->trunc = trunc;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ terms specified in this license.
|
|||
#include <assert.h>
|
||||
|
||||
LogEntry * mallocScratchCommonLogEntry(lsn_t LSN, lsn_t prevLSN, int xid, unsigned int type) {
|
||||
LogEntry * ret = calloc(1, sizeof(struct __raw_log_entry));
|
||||
LogEntry * ret = (LogEntry*)calloc(1, sizeof(struct __raw_log_entry));
|
||||
ret->LSN = LSN;
|
||||
ret->prevLSN = prevLSN;
|
||||
ret->xid = xid;
|
||||
|
@ -99,7 +99,7 @@ LogEntry * mallocScratchUpdateLogEntry(lsn_t LSN, lsn_t prevLSN, int xid,
|
|||
size_t logentrysize =
|
||||
sizeof(struct __raw_log_entry) + sizeof(UpdateLogEntry) + arg_size;
|
||||
|
||||
LogEntry * ret = calloc(1, logentrysize);
|
||||
LogEntry * ret = (LogEntry*)calloc(1, logentrysize);
|
||||
ret->LSN = LSN;
|
||||
ret->prevLSN = prevLSN;
|
||||
ret->xid = xid;
|
||||
|
|
|
@ -128,7 +128,7 @@ size_t stasis_log_reordering_handle_append(stasis_log_reordering_handle_t * h,
|
|||
intptr_t idx = (h->cur_off+h->cur_len)%h->max_len;
|
||||
h->queue[idx].p = p;
|
||||
h->queue[idx].op = op;
|
||||
h->queue[idx].arg = malloc(arg_size);
|
||||
h->queue[idx].arg = stasis_malloc(arg_size, byte);
|
||||
memcpy(h->queue[idx].arg,arg,arg_size);
|
||||
h->queue[idx].arg_size = arg_size;
|
||||
h->cur_len++;
|
||||
|
|
|
@ -189,7 +189,7 @@ static LogEntry * readLogEntry(stasis_log_safe_writes_state * sw) {
|
|||
if(!size) {
|
||||
return NULL;
|
||||
}
|
||||
ret = malloc(size);
|
||||
ret = (LogEntry*)malloc(size);
|
||||
|
||||
bytesRead = read(sw->ro_fd, ret, size);
|
||||
|
||||
|
@ -234,7 +234,7 @@ static LogEntry * readLogEntry(stasis_log_safe_writes_state * sw) {
|
|||
}
|
||||
|
||||
static inline int isDurable_LogWriter(stasis_log_t* log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
return !sw->softcommit;
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ static LogEntry* log_crc_dummy_entry(lsn_t lsn) {
|
|||
static int writeLogEntryUnlocked(stasis_log_t* log, LogEntry * e, int clearcrc);
|
||||
|
||||
static lsn_t log_crc_entry(stasis_log_t *log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
LogEntry* e= allocCommonLogEntry(log, -1, -1, INTERNALLOG);
|
||||
// TODO Clean this up; it repeats the implementation of entry_done.
|
||||
lsn_t ret = e->LSN;
|
||||
|
@ -275,7 +275,7 @@ static lsn_t log_crc_entry(stasis_log_t *log) {
|
|||
@param ret a known-valid LSN (which will be returned if the log is empty)
|
||||
*/
|
||||
static inline lsn_t log_crc_next_lsn(stasis_log_t* log, lsn_t ret) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
// Using readLogEntry() bypasses checks to see if we're past the end
|
||||
// of the log.
|
||||
LogEntry * le;
|
||||
|
@ -322,7 +322,7 @@ static inline lsn_t log_crc_next_lsn(stasis_log_t* log, lsn_t ret) {
|
|||
*/
|
||||
static int writeLogEntryUnlocked(stasis_log_t* log, LogEntry * e, int clearcrc) {
|
||||
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
|
||||
const lsn_t size = sizeofLogEntry(log, e);
|
||||
|
||||
|
@ -383,17 +383,17 @@ static int writeLogEntryUnlocked(stasis_log_t* log, LogEntry * e, int clearcrc)
|
|||
}
|
||||
|
||||
static int writeLogEntry_LogWriter(stasis_log_t* log, LogEntry * e) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
int ret = writeLogEntryUnlocked(log, e, 0);
|
||||
pthread_mutex_unlock(&sw->write_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
LogEntry* reserveEntry_LogWriter(struct stasis_log_t* log, size_t sz) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
pthread_mutex_lock(&sw->write_mutex);
|
||||
|
||||
LogEntry * e = calloc(1, sz);
|
||||
LogEntry * e = (LogEntry*)calloc(1, sz);
|
||||
|
||||
/* Set the log entry's LSN. */
|
||||
pthread_mutex_lock(&sw->nextAvailableLSN_mutex);
|
||||
|
@ -405,7 +405,7 @@ LogEntry* reserveEntry_LogWriter(struct stasis_log_t* log, size_t sz) {
|
|||
}
|
||||
|
||||
int entryDone_LogWriter(struct stasis_log_t* log, LogEntry* e) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
pthread_mutex_lock(&sw->nextAvailableLSN_mutex);
|
||||
stasis_aggregate_min_remove(sw->minPending, &e->LSN);
|
||||
pthread_mutex_unlock(&sw->nextAvailableLSN_mutex);
|
||||
|
@ -437,7 +437,7 @@ static void syncLogInternal(stasis_log_safe_writes_state* sw) {
|
|||
|
||||
static void syncLog_LogWriter(stasis_log_t * log,
|
||||
stasis_log_force_mode_t mode) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
lsn_t newFlushedLSN;
|
||||
|
||||
newFlushedLSN = log_crc_entry(log) + sizeof(lsn_t) + sizeofInternalLogEntry_LogWriter(log, 0);
|
||||
|
@ -474,7 +474,7 @@ static void syncLog_LogWriter(stasis_log_t * log,
|
|||
}
|
||||
|
||||
static lsn_t nextAvailableLSN_LogWriter(stasis_log_t * log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
pthread_mutex_lock(&sw->nextAvailableLSN_mutex);
|
||||
lsn_t ret = sw->nextAvailableLSN;
|
||||
pthread_mutex_unlock(&sw->nextAvailableLSN_mutex);
|
||||
|
@ -483,7 +483,7 @@ static lsn_t nextAvailableLSN_LogWriter(stasis_log_t * log) {
|
|||
|
||||
static lsn_t flushedLSN_LogWriter(stasis_log_t* log,
|
||||
stasis_log_force_mode_t mode) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
readlock(sw->flushedLSN_latch, 0);
|
||||
lsn_t ret;
|
||||
if(mode == LOG_FORCE_COMMIT) {
|
||||
|
@ -497,7 +497,7 @@ static lsn_t flushedLSN_LogWriter(stasis_log_t* log,
|
|||
return ret;
|
||||
}
|
||||
static lsn_t firstPendingLSN_LogWriter(stasis_log_t* log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
pthread_mutex_lock(&sw->nextAvailableLSN_mutex);
|
||||
lsn_t * retp = (lsn_t*)stasis_aggregate_min_compute(sw->minPending);
|
||||
lsn_t ret = retp ? *retp : sw->nextAvailableLSN;
|
||||
|
@ -513,7 +513,7 @@ static lsn_t flushedLSNInternal(stasis_log_safe_writes_state* sw) {
|
|||
}
|
||||
|
||||
static int close_LogWriter(stasis_log_t* log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
/* Get the whole thing to the disk before closing it. */
|
||||
syncLog_LogWriter(log, LOG_FORCE_WAL);
|
||||
|
||||
|
@ -536,7 +536,7 @@ static int close_LogWriter(stasis_log_t* log) {
|
|||
}
|
||||
|
||||
static const LogEntry * readLSNEntry_LogWriter(stasis_log_t * log, const lsn_t LSN) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
|
||||
LogEntry * ret;
|
||||
|
||||
|
@ -613,7 +613,7 @@ void readEntryDone_LogWriter(stasis_log_t *log, const LogEntry *e) {
|
|||
|
||||
*/
|
||||
static int truncateLog_LogWriter(stasis_log_t* log, lsn_t LSN) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
|
||||
FILE *tmpLog;
|
||||
|
||||
|
@ -672,7 +672,7 @@ static int truncateLog_LogWriter(stasis_log_t* log, lsn_t LSN) {
|
|||
LogEntry *firstCRC = 0;
|
||||
// zero out crc of first entry during copy
|
||||
if(firstInternalEntry && le->type == INTERNALLOG) {
|
||||
firstCRC = malloc(size);
|
||||
firstCRC = (LogEntry*)malloc(size);
|
||||
memcpy(firstCRC, le, size);
|
||||
firstCRC->prevLSN = 0;
|
||||
le = firstCRC;
|
||||
|
@ -806,7 +806,7 @@ static int truncateLog_LogWriter(stasis_log_t* log, lsn_t LSN) {
|
|||
}
|
||||
|
||||
static lsn_t firstLogEntry_LogWriter(stasis_log_t* log) {
|
||||
stasis_log_safe_writes_state* sw = log->impl;
|
||||
stasis_log_safe_writes_state* sw = (stasis_log_safe_writes_state*)log->impl;
|
||||
|
||||
assert(sw->fp);
|
||||
pthread_mutex_lock(&sw->read_mutex); // for global offset...
|
||||
|
@ -843,7 +843,7 @@ stasis_log_t* stasis_log_safe_writes_open(const char * filename,
|
|||
stasis_log_safe_writes_state * sw = stasis_alloc(stasis_log_safe_writes_state);
|
||||
sw->filename = strdup(filename);
|
||||
{
|
||||
char * log_scratch_filename = malloc(strlen(sw->filename) + 2);
|
||||
char * log_scratch_filename = stasis_malloc(strlen(sw->filename) + 2, char);
|
||||
strcpy(log_scratch_filename, sw->filename);
|
||||
strcat(log_scratch_filename, "~");
|
||||
sw->scratch_filename = log_scratch_filename;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <stasis/common.h>
|
||||
#include <stasis/operations/arrayList.h>
|
||||
#include <stasis/bufferManager.h>
|
||||
#include <stasis/transactional.h>
|
||||
|
@ -5,6 +6,8 @@
|
|||
#include <assert.h>
|
||||
#include <math.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
#define MAX_OFFSET_POSITION 3
|
||||
#define FIRST_DATA_PAGE_OFFSET 4
|
||||
|
||||
|
@ -57,7 +60,7 @@ static int array_list_op_init_header(const LogEntry* e, Page* p) {
|
|||
assert(e->update.arg_size == sizeof(array_list_parameter_t));
|
||||
|
||||
const array_list_parameter_t * alp
|
||||
= stasis_log_entry_update_args_cptr(e);
|
||||
= (const array_list_parameter_t *)stasis_log_entry_update_args_cptr(e);
|
||||
|
||||
stasis_page_fixed_initialize_page(p, sizeof(pageid_t),
|
||||
stasis_page_fixed_records_per_page(sizeof(pageid_t)));
|
||||
|
@ -266,3 +269,4 @@ int TarrayListLength(int xid, recordid rid) {
|
|||
releasePage(p);
|
||||
return alp.maxOffset+1;
|
||||
}
|
||||
END_C_DECLS
|
||||
|
|
|
@ -146,7 +146,7 @@ int TbtreeLookup(int xid, recordid rid, void * cmp_arg, byte * key, size_t keySi
|
|||
Page * p = loadPage(xid, slotrid.page);
|
||||
readlock(p->rwlatch, 0);
|
||||
slotrid.size = stasis_record_length_read(xid, p, slotrid);
|
||||
btree_leaf_pair * buf = malloc(slotrid.size);
|
||||
btree_leaf_pair * buf = (btree_leaf_pair *)malloc(slotrid.size);
|
||||
stasis_record_read(xid, p, slotrid, (byte*)buf);
|
||||
*valueSize = slotrid.size - (buf->keylen + sizeof(btree_leaf_pair));
|
||||
*value = stasis_malloc(*valueSize, byte);
|
||||
|
@ -174,7 +174,7 @@ int TbtreeInsert(int xid, recordid rid, void *cmp_arg, byte *key, size_t keySize
|
|||
stasis_record_compact_slotids(xid, p); // could do better with different api
|
||||
}
|
||||
size_t sz = sizeof(btree_leaf_pair) + keySize + valueSize;
|
||||
btree_leaf_pair *buf = malloc(sz);
|
||||
btree_leaf_pair *buf = (btree_leaf_pair *)malloc(sz);
|
||||
buf->keylen = keySize;
|
||||
memcpy(buf+1, key, keySize);
|
||||
memcpy(((byte*)(buf+1))+keySize, value, valueSize);
|
||||
|
|
|
@ -34,7 +34,7 @@ void stasis_blob_read(int xid, Page * p, recordid rid, byte * buf) {
|
|||
pageid_t chunk;
|
||||
recordid rawRid = rid;
|
||||
rawRid.size = BLOB_SLOT;
|
||||
byte * pbuf = alloca(PAGE_SIZE);
|
||||
byte * pbuf = (byte*)alloca(PAGE_SIZE);
|
||||
blob_record_t rec;
|
||||
stasis_record_read(xid, p, rawRid, (byte*)&rec);
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ static int __ThashInsert(int xid, recordid hashHeader, const byte* key, int keyS
|
|||
static int __ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize);
|
||||
|
||||
static int op_linear_hash_insert(const LogEntry* e, Page* p) {
|
||||
const linearHash_remove_arg * args = stasis_log_entry_update_args_cptr(e);
|
||||
const linearHash_remove_arg * args = (const linearHash_remove_arg *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid hashHeader = args->hashHeader;
|
||||
int keySize = args->keySize;
|
||||
int valueSize = args->valueSize;
|
||||
|
@ -131,7 +131,7 @@ static int op_linear_hash_insert(const LogEntry* e, Page* p) {
|
|||
return 0;
|
||||
}
|
||||
static int op_linear_hash_remove(const LogEntry* e, Page* p) {
|
||||
const linearHash_insert_arg * args = stasis_log_entry_update_args_cptr(e);
|
||||
const linearHash_insert_arg * args = (const linearHash_insert_arg *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid hashHeader = args->hashHeader;
|
||||
int keySize = args->keySize;
|
||||
|
||||
|
@ -170,7 +170,7 @@ int ThashInsert(int xid, recordid hashHeader, const byte* key, int keySize, cons
|
|||
hashHeader.size = sizeof(lladd_hash_header);
|
||||
pthread_mutex_lock(&linear_hash_mutex);
|
||||
int argSize = sizeof(linearHash_insert_arg)+keySize;
|
||||
linearHash_insert_arg * arg = calloc(1,argSize);
|
||||
linearHash_insert_arg * arg = (linearHash_insert_arg *)calloc(1,argSize);
|
||||
arg->hashHeader = hashHeader;
|
||||
arg->keySize = keySize;
|
||||
memcpy(arg+1, key, keySize);
|
||||
|
@ -243,7 +243,7 @@ int ThashRemove(int xid, recordid hashHeader, const byte * key, int keySize) {
|
|||
}
|
||||
|
||||
int argSize = sizeof(linearHash_remove_arg) + keySize + valueSize;
|
||||
linearHash_remove_arg * arg = calloc(1,argSize);
|
||||
linearHash_remove_arg * arg = (linearHash_remove_arg *)calloc(1,argSize);
|
||||
arg->hashHeader = hashHeader;
|
||||
arg->keySize = keySize;
|
||||
arg->valueSize = valueSize;
|
||||
|
@ -469,7 +469,7 @@ lladdIterator_t * ThashGenericIterator(int xid, recordid hash) {
|
|||
}
|
||||
|
||||
static void linearHashNTAIterator_close(int xid, void * impl) {
|
||||
lladd_linearHashNTA_generic_it * it = impl;
|
||||
lladd_linearHashNTA_generic_it * it = (lladd_linearHashNTA_generic_it *)impl;
|
||||
|
||||
ThashDone(xid, it->hit);
|
||||
|
||||
|
@ -483,7 +483,7 @@ static void linearHashNTAIterator_close(int xid, void * impl) {
|
|||
}
|
||||
|
||||
static int linearHashNTAIterator_next (int xid, void * impl) {
|
||||
lladd_linearHashNTA_generic_it * it = impl;
|
||||
lladd_linearHashNTA_generic_it * it = (lladd_linearHashNTA_generic_it *)impl;
|
||||
|
||||
if(it->lastKey) {
|
||||
free(it->lastKey);
|
||||
|
@ -497,7 +497,7 @@ static int linearHashNTAIterator_next (int xid, void * impl) {
|
|||
}
|
||||
|
||||
static int linearHashNTAIterator_key(int xid, void * impl, byte ** key) {
|
||||
lladd_linearHashNTA_generic_it * it = impl;
|
||||
lladd_linearHashNTA_generic_it * it = (lladd_linearHashNTA_generic_it *)impl;
|
||||
|
||||
*key = it->lastKey;
|
||||
|
||||
|
@ -505,7 +505,7 @@ static int linearHashNTAIterator_key(int xid, void * impl, byte ** key) {
|
|||
}
|
||||
|
||||
static int linearHashNTAIterator_value(int xid, void * impl, byte ** value) {
|
||||
lladd_linearHashNTA_generic_it * it = impl;
|
||||
lladd_linearHashNTA_generic_it * it = (lladd_linearHashNTA_generic_it *)impl;
|
||||
|
||||
*value = it->lastValue;
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ typedef struct {
|
|||
|
||||
static int op_linked_list_nta_insert(const LogEntry* e, Page* p) {
|
||||
assert(!p);
|
||||
const stasis_linked_list_remove_log * log = stasis_log_entry_update_args_cptr(e);;
|
||||
const stasis_linked_list_remove_log * log = (const stasis_linked_list_remove_log *)stasis_log_entry_update_args_cptr(e);;
|
||||
|
||||
byte * key;
|
||||
byte * value;
|
||||
|
@ -82,7 +82,7 @@ static int op_linked_list_nta_insert(const LogEntry* e, Page* p) {
|
|||
}
|
||||
static int op_linked_list_nta_remove(const LogEntry *e, Page* p) {
|
||||
assert(!p);
|
||||
const stasis_linked_list_remove_log * log = stasis_log_entry_update_args_cptr(e);
|
||||
const stasis_linked_list_remove_log * log = (const stasis_linked_list_remove_log *)stasis_log_entry_update_args_cptr(e);
|
||||
|
||||
byte * key;
|
||||
int keySize;
|
||||
|
@ -170,7 +170,7 @@ static void stasis_linked_list_insert_helper(int xid, recordid list, const byte
|
|||
|
||||
int TlinkedListFind(int xid, recordid list, const byte * key, int keySize, byte ** value) {
|
||||
|
||||
stasis_linkedList_entry * entry = malloc(list.size);
|
||||
stasis_linkedList_entry * entry = (stasis_linkedList_entry *)malloc(list.size);
|
||||
|
||||
pthread_mutex_lock(&stasis_linked_list_mutex);
|
||||
Tread(xid, list, entry);
|
||||
|
@ -224,7 +224,7 @@ int TlinkedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
|||
}
|
||||
|
||||
int entrySize = sizeof(stasis_linked_list_remove_log) + keySize + valueSize;
|
||||
stasis_linked_list_remove_log * undoLog = malloc(entrySize);
|
||||
stasis_linked_list_remove_log * undoLog = (stasis_linked_list_remove_log *)malloc(entrySize);
|
||||
|
||||
undoLog->list = list;
|
||||
undoLog->keySize = keySize;
|
||||
|
@ -247,7 +247,7 @@ int TlinkedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
|||
}
|
||||
|
||||
static int stasis_linked_list_remove_helper(int xid, recordid list, const byte * key, int keySize) {
|
||||
stasis_linkedList_entry * entry = malloc(list.size);
|
||||
stasis_linkedList_entry * entry = (stasis_linkedList_entry *)malloc(list.size);
|
||||
pthread_mutex_lock(&stasis_linked_list_mutex);
|
||||
|
||||
Tread(xid, list, entry);
|
||||
|
@ -273,14 +273,14 @@ static int stasis_linked_list_remove_helper(int xid, recordid list, const byte *
|
|||
Tset(xid, lastRead, entry);
|
||||
} else {
|
||||
assert(entry->next.size == list.size); // Otherwise, something strange is happening, or the list contains entries with variable sizes.
|
||||
stasis_linkedList_entry * entry2 = malloc(list.size);
|
||||
stasis_linkedList_entry * entry2 = (stasis_linkedList_entry *)malloc(list.size);
|
||||
Tread(xid, entry->next, entry2);
|
||||
Tdealloc(xid, entry->next); // could break iterator, since it writes one entry ahead.
|
||||
Tset(xid, lastRead, entry2);
|
||||
free(entry2);
|
||||
}
|
||||
} else {
|
||||
stasis_linkedList_entry * entry2 = malloc(list.size);
|
||||
stasis_linkedList_entry * entry2 = (stasis_linkedList_entry *)malloc(list.size);
|
||||
assert(oldLastRead.size != -2);
|
||||
Tread(xid, oldLastRead, entry2);
|
||||
memcpy(&(entry2->next), &(entry->next), sizeof(recordid));
|
||||
|
@ -336,7 +336,7 @@ recordid TlinkedListCreate(int xid, int keySize, int valueSize) {
|
|||
return ret;
|
||||
}
|
||||
void TlinkedListDelete(int xid, recordid list) {
|
||||
stasis_linkedList_entry * entry = malloc(list.size);
|
||||
stasis_linkedList_entry * entry = (stasis_linkedList_entry *)malloc(list.size);
|
||||
|
||||
Tread(xid, list, entry);
|
||||
Tdealloc(xid, list);
|
||||
|
@ -382,7 +382,7 @@ int TlinkedListNext(int xid, stasis_linkedList_iterator * it, byte ** key, int *
|
|||
if(it->first == -1) {
|
||||
it->first = 1;
|
||||
} else if(it->first) {
|
||||
entry = malloc(it->next.size);
|
||||
entry = (stasis_linkedList_entry *)malloc(it->next.size);
|
||||
Tread(xid, it->listRoot, entry);
|
||||
int listTouched;
|
||||
listTouched = memcmp(&(entry->next), &(it->next), sizeof(recordid));
|
||||
|
@ -405,7 +405,7 @@ int TlinkedListNext(int xid, stasis_linkedList_iterator * it, byte ** key, int *
|
|||
}
|
||||
|
||||
assert(it->keySize + it->valueSize + sizeof(stasis_linkedList_entry) == it->next.size);
|
||||
entry = malloc(it->next.size);
|
||||
entry = (stasis_linkedList_entry *)malloc(it->next.size);
|
||||
Tread(xid, it->next, entry);
|
||||
|
||||
if(entry->next.size) {
|
||||
|
|
|
@ -10,7 +10,7 @@ static int op_lsn_free_set(const LogEntry *e, Page *p) {
|
|||
assert(e->update.arg_size >= (sizeof(pageoff_t) * 2));
|
||||
int size = e->update.arg_size;
|
||||
size -= (2*sizeof(pageoff_t));
|
||||
const pageoff_t * a = stasis_log_entry_update_args_cptr(e);
|
||||
const pageoff_t * a = (const pageoff_t *)stasis_log_entry_update_args_cptr(e);
|
||||
const byte* b = (const byte*)&(a[2]);
|
||||
assertlocked(p->rwlatch);
|
||||
memcpy(p->memAddr + a[0], b, a[1]);
|
||||
|
@ -21,7 +21,7 @@ static int op_lsn_free_unset(const LogEntry *e, Page *p) {
|
|||
assert(e->update.arg_size >= (sizeof(pageoff_t) * 2));
|
||||
int size = e->update.arg_size;
|
||||
size -= (2*sizeof(pageoff_t));
|
||||
const pageoff_t * a = stasis_log_entry_update_args_cptr(e);
|
||||
const pageoff_t * a = (const pageoff_t *)stasis_log_entry_update_args_cptr(e);
|
||||
const byte* b = (const byte*)&(a[2]);
|
||||
assertlocked(p->rwlatch);
|
||||
memcpy(p->memAddr + a[0], b+a[1], a[1]);
|
||||
|
|
|
@ -77,7 +77,7 @@ static void expand(int xid, recordid hash, int next_split, int i, int keySize, i
|
|||
#define AMORTIZE 1000
|
||||
#define FF_AM 750
|
||||
if(count <= 0 && !(count * -1) % FF_AM) {
|
||||
recordid * headerRidB = pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
|
||||
int j;
|
||||
TarrayListExtend(xid, hash, AMORTIZE);
|
||||
for(j = 0; j < AMORTIZE; j++) {
|
||||
|
@ -95,7 +95,7 @@ static void expand(int xid, recordid hash, int next_split, int i, int keySize, i
|
|||
}
|
||||
|
||||
static void update_hash_header(int xid, recordid hash, pageid_t i, pageid_t next_split) {
|
||||
hashEntry * he = pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
|
||||
hashEntry * he = (hashEntry *)pblHtLookup(openHashes, &(hash.page), sizeof(hash.page));
|
||||
assert(he);
|
||||
recordid * headerRidB = &he->next;
|
||||
|
||||
|
@ -284,8 +284,7 @@ static int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry
|
|||
void * key, int keySize, int valSize, recordid * deletedEntry) {
|
||||
if(bucket_contents->next.size == 0) { return 0; }
|
||||
|
||||
recordid this = hash;
|
||||
this.slot = bucket_number;
|
||||
hash.slot = bucket_number;
|
||||
|
||||
int found = 0;
|
||||
if(!memcmp(bucket_contents+1, key, keySize)) {
|
||||
|
@ -293,12 +292,12 @@ static int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry
|
|||
deletedEntry->size = 0; /* size = 0 -> don't delete (this is a bucket!) */
|
||||
if(bucket_contents->next.size == -1) {
|
||||
memset(bucket_contents, 0, sizeof(hashEntry) + keySize + valSize);
|
||||
Tset(xid, this, bucket_contents);
|
||||
Tset(xid, hash, bucket_contents);
|
||||
} else {
|
||||
assert(bucket_contents->next.size == sizeof(hashEntry) + keySize + valSize);
|
||||
recordid oldNext = bucket_contents->next;
|
||||
Tread(xid, bucket_contents->next, bucket_contents);
|
||||
Tset(xid, this, bucket_contents);
|
||||
Tset(xid, hash, bucket_contents);
|
||||
*deletedEntry = oldNext; /* @todo delete from bucket really should do its own deallocation.. */
|
||||
}
|
||||
return 1;
|
||||
|
@ -312,7 +311,7 @@ static int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry
|
|||
recordid Aaddr, Baddr;
|
||||
|
||||
memcpy(B, bucket_contents, sizeof(hashEntry) + keySize + valSize);
|
||||
Baddr = this;
|
||||
Baddr = hash;
|
||||
while(B->next.size != -1) {
|
||||
hashEntry * tmp = A;
|
||||
A = B;
|
||||
|
@ -402,7 +401,7 @@ void TnaiveHashInsert(int xid, recordid hashRid,
|
|||
void * key, int keySize,
|
||||
void * val, int valSize) {
|
||||
|
||||
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
|
||||
int bucket =
|
||||
2 + stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2);
|
||||
|
@ -426,7 +425,7 @@ void TnaiveHashInsert(int xid, recordid hashRid,
|
|||
so that expand can be selectively called. */
|
||||
int TnaiveHashDelete(int xid, recordid hashRid,
|
||||
void * key, int keySize, int valSize) {
|
||||
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
|
||||
int bucket_number = stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
recordid deleteMe;
|
||||
|
@ -464,14 +463,14 @@ void TnaiveHashUpdate(int xid, recordid hashRid, void * key, int keySize, void *
|
|||
|
||||
|
||||
int TnaiveHashClose(int xid, recordid hashRid) {
|
||||
recordid * freeMe = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
recordid * freeMe = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
pblHtRemove(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
free(freeMe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int TnaiveHashLookup(int xid, recordid hashRid, void * key, int keySize, void * buf, int valSize) {
|
||||
recordid * headerRidB = pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
recordid * headerRidB = (recordid *)pblHtLookup(openHashes, &(hashRid.page), sizeof(hashRid.page));
|
||||
int bucket_number = stasis_linear_hash(key, keySize, headerHashBits, headerNextSplit - 2) + 2;
|
||||
int ret = findInBucket(xid, hashRid, bucket_number, key, keySize, buf, valSize);
|
||||
return ret;
|
||||
|
|
|
@ -189,7 +189,7 @@ void TinitializeFixedPage(int xid, pageid_t page, int slotLength) {
|
|||
|
||||
static int op_initialize_page(const LogEntry* e, Page* p) {
|
||||
assert(e->update.arg_size == sizeof(page_init_arg));
|
||||
const page_init_arg* arg = stasis_log_entry_update_args_cptr(e);
|
||||
const page_init_arg* arg = (const page_init_arg*)stasis_log_entry_update_args_cptr(e);
|
||||
|
||||
switch(arg->slot) {
|
||||
case SLOTTED_PAGE:
|
||||
|
@ -213,7 +213,7 @@ typedef struct {
|
|||
} init_multipage_arg;
|
||||
|
||||
static int op_init_multipage_impl(const LogEntry *e, Page *ignored) {
|
||||
const init_multipage_arg* arg = stasis_log_entry_update_args_cptr(e);
|
||||
const init_multipage_arg* arg = (const init_multipage_arg*)stasis_log_entry_update_args_cptr(e);
|
||||
|
||||
for(pageid_t i = 0; i < arg->numPages; i++) {
|
||||
Page * p = loadPage(e->xid, arg->firstPage + i);
|
||||
|
|
|
@ -90,7 +90,7 @@ void * getPrepareGuardState(void) {
|
|||
|
||||
|
||||
int prepareGuard(const LogEntry * e, void * state) {
|
||||
PrepareGuardState * pgs = state;
|
||||
PrepareGuardState * pgs = (PrepareGuardState *)state;
|
||||
int ret = pgs->continueIterating;
|
||||
if(e->type == UPDATELOG && !pgs->aborted) {
|
||||
if(e->update.funcID == OPERATION_PREPARE) {
|
||||
|
@ -113,7 +113,7 @@ int prepareGuard(const LogEntry * e, void * state) {
|
|||
/** @todo When fleshing out the logHandle's prepareAction interface,
|
||||
figure out what the return value should mean... */
|
||||
int prepareAction(void * state) {
|
||||
PrepareGuardState * pgs = state;
|
||||
PrepareGuardState * pgs = (PrepareGuardState *)state;
|
||||
int ret;
|
||||
if(!(pgs->continueIterating || pgs->aborted)) {
|
||||
//assert(pgs->prevLSN != -1);
|
||||
|
|
|
@ -27,7 +27,7 @@ static int alloc_boundary_tag(int xid, Page *p, const boundary_tag *arg) {
|
|||
byte *buf = stasis_record_write_begin(xid, p, rid);
|
||||
memcpy(buf, arg, sizeof(boundary_tag));
|
||||
stasis_record_write_done(xid, p, rid, buf);
|
||||
stasis_dirty_page_table_set_dirty(stasis_runtime_dirty_page_table(), p);
|
||||
stasis_dirty_page_table_set_dirty((stasis_dirty_page_table_t*)stasis_runtime_dirty_page_table(), p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,14 +36,14 @@ static int alloc_boundary_tag(int xid, Page *p, const boundary_tag *arg) {
|
|||
top action's logical undo grabs the necessary latches.
|
||||
*/
|
||||
static int op_alloc_boundary_tag(const LogEntry* e, Page* p) {
|
||||
return alloc_boundary_tag(e->xid, p, stasis_log_entry_update_args_cptr(e));
|
||||
return alloc_boundary_tag(e->xid, p, (const boundary_tag*)stasis_log_entry_update_args_cptr(e));
|
||||
}
|
||||
|
||||
static int op_alloc_region(const LogEntry *e, Page* p) {
|
||||
pthread_mutex_lock(®ion_mutex);
|
||||
assert(0 == holding_mutex);
|
||||
holding_mutex = pthread_self();
|
||||
const regionAllocArg *dat = stasis_log_entry_update_args_cptr(e);
|
||||
const regionAllocArg *dat = (const regionAllocArg *)stasis_log_entry_update_args_cptr(e);
|
||||
TregionAllocHelper(e->xid, dat->startPage, dat->pageCount, dat->allocationManager);
|
||||
holding_mutex = 0;
|
||||
pthread_mutex_unlock(®ion_mutex);
|
||||
|
@ -75,7 +75,7 @@ static int op_dealloc_region(const LogEntry* e, Page* p) {
|
|||
assert(0 == holding_mutex);
|
||||
holding_mutex = pthread_self();
|
||||
|
||||
ret = operate_dealloc_region_unlocked(e->xid, stasis_log_entry_update_args_cptr(e));
|
||||
ret = operate_dealloc_region_unlocked(e->xid, (const regionAllocArg*)stasis_log_entry_update_args_cptr(e));
|
||||
|
||||
holding_mutex = 0;
|
||||
pthread_mutex_unlock(®ion_mutex);
|
||||
|
@ -502,13 +502,13 @@ pageid_t TregionSize(int xid, pageid_t firstPage) {
|
|||
void TregionForce(int xid, stasis_buffer_manager_t * bm, stasis_buffer_manager_handle_t * h, pageid_t firstPage) {
|
||||
pageid_t endOfRange = firstPage + TregionSize(xid, firstPage);
|
||||
stasis_dirty_page_table_flush_range(
|
||||
stasis_runtime_dirty_page_table(),
|
||||
(stasis_dirty_page_table_t*)stasis_runtime_dirty_page_table(),
|
||||
firstPage, endOfRange);
|
||||
bm = bm ? bm : stasis_runtime_buffer_manager();
|
||||
bm = bm ? bm : (stasis_buffer_manager_t*)stasis_runtime_buffer_manager();
|
||||
bm->forcePageRange(bm, h, firstPage, endOfRange);
|
||||
}
|
||||
void TregionPrefetch(int xid, pageid_t firstPage) {
|
||||
stasis_buffer_manager_t * bm = stasis_runtime_buffer_manager();
|
||||
stasis_buffer_manager_t * bm = (stasis_buffer_manager_t*)stasis_runtime_buffer_manager();
|
||||
pageid_t endOfRange = firstPage + TregionSize(xid, firstPage);
|
||||
bm->prefetchPages(bm, firstPage, endOfRange);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ ssize_t Tpwrite(int xid, const byte * buf, size_t count, off_t offset) {
|
|||
static int op_segment_file_pwrite(const LogEntry* e, Page* p) {
|
||||
assert(p == 0);
|
||||
size_t count = (e->update.arg_size - sizeof(segment_file_arg_t)) / 2;
|
||||
const segment_file_arg_t * arg = stasis_log_entry_update_args_cptr(e);
|
||||
const segment_file_arg_t * arg = (const segment_file_arg_t *)stasis_log_entry_update_args_cptr(e);
|
||||
off_t offset = arg->offset;
|
||||
read_write_helper(0, e->xid, e->LSN, (byte*)(arg+1), count, offset);
|
||||
return 0;
|
||||
|
@ -121,7 +121,7 @@ static int op_segment_file_pwrite(const LogEntry* e, Page* p) {
|
|||
static int op_segment_file_pwrite_inverse(const LogEntry* e, Page* p) {
|
||||
assert(p == 0);
|
||||
size_t count = (e->update.arg_size - sizeof(segment_file_arg_t)) / 2;
|
||||
const segment_file_arg_t * arg = stasis_log_entry_update_args_cptr(e);
|
||||
const segment_file_arg_t * arg = (const segment_file_arg_t *)stasis_log_entry_update_args_cptr(e);
|
||||
off_t offset = arg->offset;
|
||||
read_write_helper(0, e->xid, e->LSN, ((byte*)(arg+1))+count, count, offset);
|
||||
return 0;
|
||||
|
|
|
@ -52,7 +52,7 @@ terms specified in this license.
|
|||
#include <assert.h>
|
||||
static int op_set(const LogEntry *e, Page *p) {
|
||||
assert(e->update.arg_size >= sizeof(slotid_t) + sizeof(int64_t));
|
||||
const byte * b = stasis_log_entry_update_args_cptr(e);
|
||||
const byte * b = (const byte *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid rid;
|
||||
|
||||
rid.page = p->id;
|
||||
|
@ -69,7 +69,7 @@ static int op_set(const LogEntry *e, Page *p) {
|
|||
}
|
||||
static int op_set_inverse(const LogEntry *e, Page *p) {
|
||||
assert(e->update.arg_size >= sizeof(slotid_t) + sizeof(int64_t));
|
||||
const byte * b = stasis_log_entry_update_args_cptr(e);
|
||||
const byte * b = (const byte *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid rid;
|
||||
|
||||
rid.page = p->id;
|
||||
|
@ -101,7 +101,7 @@ Page * TsetWithPage(int xid, recordid rid, Page *p, const void * dat) {
|
|||
short type = stasis_record_type_read(xid,p,rid);
|
||||
|
||||
if(type == BLOB_SLOT) {
|
||||
stasis_blob_write(xid,p,rid,dat);
|
||||
stasis_blob_write(xid,p,rid,(const byte*)dat);
|
||||
unlock(p->rwlatch);
|
||||
} else {
|
||||
rid.size = stasis_record_type_to_size(rid.size);
|
||||
|
@ -153,7 +153,7 @@ static int op_set_range(const LogEntry* e, Page* p) {
|
|||
int diffLength = e->update.arg_size - sizeof(set_range_t);
|
||||
assert(! (diffLength % 2));
|
||||
diffLength >>= 1;
|
||||
const set_range_t * range = stasis_log_entry_update_args_cptr(e);
|
||||
const set_range_t * range = (const set_range_t *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid rid;
|
||||
rid.page = p->id;
|
||||
rid.slot = range->slot;
|
||||
|
@ -175,7 +175,7 @@ static int op_set_range_inverse(const LogEntry* e, Page* p) {
|
|||
int diffLength = e->update.arg_size - sizeof(set_range_t);
|
||||
assert(! (diffLength % 2));
|
||||
diffLength >>= 1;
|
||||
const set_range_t * range = stasis_log_entry_update_args_cptr(e);
|
||||
const set_range_t * range = (const set_range_t *)stasis_log_entry_update_args_cptr(e);
|
||||
recordid rid;
|
||||
rid.page = p->id;
|
||||
rid.slot = range->slot;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
#include <assert.h>
|
||||
|
||||
//-------------- New API below this line
|
||||
BEGIN_C_DECLS
|
||||
|
||||
static inline void stasis_page_fixed_checkRid(Page * page, recordid rid) {
|
||||
assert(page->pageType); // any more specific breaks pages based on this one
|
||||
|
@ -158,3 +158,5 @@ page_impl stasis_page_array_list_impl(void) {
|
|||
|
||||
void stasis_page_fixed_init(void) { }
|
||||
void stasis_page_fixed_deinit(void) { }
|
||||
|
||||
END_C_DECLS
|
||||
|
|
|
@ -83,7 +83,7 @@ static inline lsn_t freespace(stasis_ringbuffer_t * ring) {
|
|||
}
|
||||
|
||||
// Does not need any synchronization (all fields are read only)
|
||||
static inline void* ptr_off(stasis_ringbuffer_t * ring, lsn_t off) {
|
||||
static inline byte* ptr_off(stasis_ringbuffer_t * ring, lsn_t off) {
|
||||
return ring->mem + (off & ring->mask);
|
||||
}
|
||||
|
||||
|
@ -174,16 +174,16 @@ lsn_t stasis_ringbuffer_consume_bytes(stasis_ringbuffer_t * ring, lsn_t* sz, lsn
|
|||
return ret;
|
||||
}
|
||||
// Not threadsafe.
|
||||
const void * stasis_ringbuffer_nb_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
const byte * stasis_ringbuffer_nb_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
lsn_t off2 = stasis_ringbuffer_nb_consume_bytes(ring, off, &sz);
|
||||
if(off2 != off) { if(off != RING_NEXT || (off2 < 0 && off2 > RING_MINERR)) { return (const void*) (intptr_t)off2; } }
|
||||
if(off2 != off) { if(off != RING_NEXT || (off2 < 0 && off2 > RING_MINERR)) { return (const byte*) (intptr_t)off2; } }
|
||||
assert(! (off2 < 0 && off2 >= RING_MINERR));
|
||||
return ptr_off(ring, off2);
|
||||
}
|
||||
// Explicit synchronization (blocks).
|
||||
const void * stasis_ringbuffer_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
const byte * stasis_ringbuffer_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
pthread_mutex_lock(&ring->mut);
|
||||
const void * ret;
|
||||
const byte * ret;
|
||||
assert(sz != RING_NEXT);
|
||||
while(((const void*)RING_VOLATILE) == (ret = stasis_ringbuffer_nb_get_rd_buf(ring, off, sz))) {
|
||||
pthread_cond_wait(&ring->write_done, &ring->mut);
|
||||
|
@ -192,7 +192,7 @@ const void * stasis_ringbuffer_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off,
|
|||
return ret;
|
||||
}
|
||||
// No need for synchronization (only touches read-only-fields)
|
||||
void * stasis_ringbuffer_get_wr_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
byte * stasis_ringbuffer_get_wr_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz) {
|
||||
return ptr_off(ring, off);
|
||||
}
|
||||
void stasis_ringbuffer_nb_advance_write_tail(stasis_ringbuffer_t * ring, lsn_t off) {
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include <stasis/common.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
struct stasis_allocation_policy_t;
|
||||
typedef struct stasis_allocation_policy_t stasis_allocation_policy_t;
|
||||
|
||||
|
@ -29,4 +31,6 @@ void stasis_allocation_policy_alloced_from_page(stasis_allocation_policy_t * ap,
|
|||
@return true if the allocation would be safe. false if not sure.
|
||||
*/
|
||||
int stasis_allocation_policy_can_xid_alloc_from_page(stasis_allocation_policy_t * ap, int xid, pageid_t page);
|
||||
|
||||
END_C_DECLS
|
||||
#endif // ALLOCATION_POLICY_H
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#ifndef CONCURRENTBUFFERMANAGER_H_
|
||||
#define CONCURRENTBUFFERMANAGER_H_
|
||||
#include <stasis/bufferManager.h>
|
||||
BEGIN_C_DECLS
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_factory(stasis_log_t *log, stasis_dirty_page_table_t *dpt);
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt);
|
||||
END_C_DECLS
|
||||
#endif /* CONCURRENTBUFFERMANAGER_H_ */
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef STASIS_BUFFERMANAGER_LEGACY_LEGACYBUFFERMANAGER_H
|
||||
#define STASIS_BUFFERMANAGER_LEGACY_LEGACYBUFFERMANAGER_H
|
||||
#include <stasis/pageHandle.h>
|
||||
BEGIN_C_DECLS
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_deprecated_open(stasis_page_handle_t * ph);
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_deprecated_factory(stasis_log_t *log, stasis_dirty_page_table_t *dpt);
|
||||
END_C_DECLS
|
||||
#endif//STASIS_BUFFERMANAGER_LEGACY_LEGACYBUFFERMANAGER_H
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
#include <stasis/pageHandle.h>
|
||||
#include <stasis/logger/logger2.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
stasis_page_handle_t* openPageFile(stasis_log_t * log, stasis_dirty_page_table_t * dirtyPages);
|
||||
stasis_page_handle_t* stasis_page_handle_deprecated_factory(stasis_log_t *log, stasis_dirty_page_table_t *dpt);
|
||||
|
||||
END_C_DECLS
|
||||
#endif /* __PAGE_FILE_H */
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
#ifndef STASIS_PAGE_ARRAY_H
|
||||
#define STASIS_PAGE_ARRAY_H
|
||||
#include <stasis/bufferManager.h>
|
||||
BEGIN_C_DECLS
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_mem_array_open();
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_mem_array_factory(stasis_log_t * log, stasis_dirty_page_table_t *dpt);
|
||||
END_C_DECLS
|
||||
#endif // STASIS_PAGE_ARRAY_H
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
#include <stasis/dirtyPageTable.h>
|
||||
#include <stasis/io/handle.h>
|
||||
#include <stasis/pageHandle.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
/**
|
||||
This is the type of log that is being used.
|
||||
|
||||
|
@ -232,4 +235,7 @@ extern lsn_t stasis_log_file_write_buffer_size;
|
|||
@todo Stasis' segment implementation is a work in progress; therefore this is set to zero by default.
|
||||
*/
|
||||
extern int stasis_segments_enabled;
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#include <stasis/common.h>
|
||||
BEGIN_C_DECLS
|
||||
|
||||
/** @file */
|
||||
typedef struct range {
|
||||
|
@ -48,3 +50,4 @@ static inline long rangeTrackerRoundDown(long x, long quant) {
|
|||
static inline long rangeTrackerRoundUp(long x, long quant) {
|
||||
return (((x-1) / quant) + 1) * quant;
|
||||
}
|
||||
END_C_DECLS
|
||||
|
|
|
@ -2,9 +2,10 @@
|
|||
#define __INMEMORYLOG
|
||||
|
||||
#include <stasis/logger/logger2.h>
|
||||
BEGIN_C_DECLS
|
||||
/**
|
||||
* Allocate a new non-persistent Stasis log.
|
||||
*/
|
||||
stasis_log_t* stasis_log_impl_in_memory_open();
|
||||
|
||||
END_C_DECLS
|
||||
#endif
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <stasis/operations.h>
|
||||
#include <stasis/allocationPolicy.h>
|
||||
BEGIN_C_DECLS
|
||||
stasis_operation_impl stasis_op_impl_alloc();
|
||||
stasis_operation_impl stasis_op_impl_dealloc();
|
||||
stasis_operation_impl stasis_op_impl_realloc();
|
||||
|
@ -70,5 +71,5 @@ int TrecordSize(int xid, recordid rid);
|
|||
|
||||
/** Return the number of records stored in page pageid */
|
||||
int TrecordsInPage(int xid, pageid_t page);
|
||||
|
||||
END_C_DECLS
|
||||
#endif
|
||||
|
|
|
@ -75,7 +75,9 @@ terms specified in this license.
|
|||
/** @{ */
|
||||
#ifndef __ARRAY_LIST_H
|
||||
#define __ARRAY_LIST_H
|
||||
#include <stasis/common.h>
|
||||
#include <stasis/operations.h>
|
||||
BEGIN_C_DECLS
|
||||
/** Allocate a new array list.
|
||||
|
||||
@param xid The transaction allocating the new arrayList.
|
||||
|
@ -117,4 +119,5 @@ recordid stasis_array_list_dereference_recordid(int xid, Page * p, int offset);
|
|||
|
||||
stasis_operation_impl stasis_op_impl_array_list_header_init();
|
||||
/** @} */
|
||||
END_C_DECLS
|
||||
#endif
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
#include <stasis/operations.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
typedef int(*stasis_comparator_t)(const void*, size_t, const void*, size_t, void*);
|
||||
typedef int16_t stasis_comparator_id_t;
|
||||
|
||||
|
@ -19,4 +21,6 @@ recordid TbtreeCreate(int xid, stasis_comparator_id_t cmp_id);
|
|||
int TbtreeLookup(int xid, recordid rid, void * cmp_arg, byte * key, size_t keySize, byte ** value, size_t* valueSize);
|
||||
int TbtreeInsert(int xid, recordid rid, void *cmp_arg, byte *key, size_t keySize, byte *value, size_t valueSize);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif /* BTREE_H_ */
|
||||
|
|
|
@ -59,6 +59,8 @@ terms specified in this license.
|
|||
#define __PAGE_OPERATIONS_H__
|
||||
#include <stasis/operations.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
pageid_t TpageAlloc(int xid);
|
||||
pageid_t TfixedPageAlloc(int xid, int size);
|
||||
pageid_t TpageAllocMany(int xid, int count);
|
||||
|
@ -107,4 +109,6 @@ stasis_operation_impl stasis_op_impl_fixed_page_alloc();
|
|||
|
||||
void pageOperationsInit(stasis_log_t *log);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#ifndef __FIXED_H
|
||||
#define __FIXED_H
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
void stasis_page_fixed_init();
|
||||
void stasis_page_fixed_deinit();
|
||||
|
||||
|
@ -11,4 +13,7 @@ recordid stasis_page_fixed_next_record(int xid, Page *p, recordid rid);
|
|||
|
||||
page_impl stasis_page_fixed_impl();
|
||||
page_impl stasis_page_array_list_impl();
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,6 +12,9 @@
|
|||
extension.
|
||||
*/
|
||||
|
||||
#include <stasis/common.h>
|
||||
BEGIN_C_DECLS
|
||||
|
||||
typedef struct replacementPolicy {
|
||||
/** Factory method */
|
||||
struct replacementPolicy* (*init)();
|
||||
|
@ -47,3 +50,5 @@ replacementPolicy * lruFastInit();
|
|||
replacementPolicy* replacementPolicyThreadsafeWrapperInit(replacementPolicy* rp);
|
||||
replacementPolicy* replacementPolicyConcurrentWrapperInit(replacementPolicy** rp, int count);
|
||||
replacementPolicy* replacementPolicyClockInit(Page * pageArray, int page_count);
|
||||
|
||||
END_C_DECLS
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
#include <stasis/common.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
typedef int (*stasis_transaction_table_callback_t)(int, void*);
|
||||
|
||||
typedef struct stasis_transaction_table_callback_list_t stasis_transaction_table_callback_list_t;
|
||||
|
@ -87,4 +89,6 @@ int stasis_transaction_table_invoke_callbacks(stasis_transaction_table_t *tbl,
|
|||
int stasis_transaction_table_set_argument(stasis_transaction_table_t *tbl, int xid, int callback_id,
|
||||
stasis_transaction_table_callback_type_t type, void *arg);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif /* TRANSACTIONTABLE_H_ */
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#define CONCURRENTHASH_H_
|
||||
#include <stasis/common.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
typedef struct hashtable_t hashtable_t;
|
||||
typedef struct bucket_t bucket_t;
|
||||
|
||||
|
@ -62,5 +64,6 @@ void hashtable_unlock(hashtable_bucket_handle_t *h);
|
|||
*/
|
||||
int hashtable_debug_number_of_key_copies(hashtable_t *ht, pageid_t pageied);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif /* CONCURRENTHASH_H_ */
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#define HISTOGRAM_H_
|
||||
|
||||
#include <stasis/common.h>
|
||||
BEGIN_C_DECLS
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
@ -182,5 +184,5 @@ void stasis_histogram_pretty_print_32(stasis_histogram_32_t* a) {
|
|||
printf(" us ms s ks Ms\n");
|
||||
}
|
||||
}
|
||||
|
||||
END_C_DECLS
|
||||
#endif /* HISTOGRAM_H_ */
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
#ifndef STASIS_LHTABLE_H
|
||||
#define STASIS_LHTABLE_H
|
||||
|
||||
#include <stasis/common.h>
|
||||
|
||||
BEGIN_C_DECLS
|
||||
|
||||
#ifndef LH_ENTRY
|
||||
#define LH_ENTRY(foo) lh##foo
|
||||
#endif // LH_ENTRY
|
||||
|
@ -71,4 +75,6 @@ void LH_ENTRY(openlist)(const struct LH_ENTRY(table) * table,
|
|||
const struct LH_ENTRY(pair_t)* LH_ENTRY(readlist)(struct LH_ENTRY(list)* list);
|
||||
void LH_ENTRY(closelist)(struct LH_ENTRY(list) * list);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif // STASIS_LHTABLE_H
|
||||
|
|
|
@ -24,14 +24,16 @@
|
|||
/* Header file for redblack.c, should be included by any code that
|
||||
** uses redblack.c since it defines the functions
|
||||
*/
|
||||
#include <stasis/common.h>
|
||||
|
||||
/* Stop multiple includes */
|
||||
#ifdef STLSEARCH
|
||||
#include "stlredblack.h"
|
||||
#else
|
||||
#ifndef _REDBLACK_H
|
||||
|
||||
#ifndef RB_CUSTOMIZE
|
||||
|
||||
BEGIN_C_DECLS
|
||||
/*
|
||||
* Without customization, the data member in the tree nodes is a void
|
||||
* pointer, and you need to pass in a comparison function to be
|
||||
|
@ -129,6 +131,9 @@ RB_STATIC void RB_ENTRY(walk)(const struct RB_ENTRY(tree) *,
|
|||
RB_STATIC RBLIST *RB_ENTRY(openlist)(const struct RB_ENTRY(tree) *);
|
||||
RB_STATIC const RB_ENTRY(data_t) *RB_ENTRY(readlist)(RBLIST *);
|
||||
RB_STATIC void RB_ENTRY(closelist)(RBLIST *);
|
||||
|
||||
END_C_DECLS
|
||||
|
||||
#endif
|
||||
|
||||
/* Some useful macros */
|
||||
|
@ -136,6 +141,7 @@ RB_STATIC void RB_ENTRY(closelist)(RBLIST *);
|
|||
#define rbmax(rbinfo) RB_ENTRY(lookup)(RB_LULAST, NULL, (rbinfo))
|
||||
|
||||
#define _REDBLACK_H
|
||||
|
||||
#endif /* _REDBLACK_H */
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,13 +18,13 @@ lsn_t stasis_ringbuffer_reserve_space(stasis_ringbuffer_t * ring, lsn_t sz, lsn_
|
|||
void stasis_ringbuffer_read_done(stasis_ringbuffer_t * ring, lsn_t * handle);
|
||||
void stasis_ringbuffer_advance_write_tail(stasis_ringbuffer_t * ring, lsn_t off);
|
||||
void stasis_ringbuffer_reading_writer_done(stasis_ringbuffer_t * ring, lsn_t * handle);
|
||||
const void * stasis_ringbuffer_nb_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
const byte * stasis_ringbuffer_nb_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
// sz is a pointer to the desired size, or RING_NEXT for "as many bytes as possible"
|
||||
lsn_t stasis_ringbuffer_consume_bytes(stasis_ringbuffer_t * ring, lsn_t* sz, lsn_t * handle);
|
||||
void stasis_ringbuffer_write_done(stasis_ringbuffer_t * ring, lsn_t * handle);
|
||||
// sz is a pointer to the desired size, or RING_NEXT for "as many bytes as possible"
|
||||
const void * stasis_ringbuffer_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
void * stasis_ringbuffer_get_wr_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
const byte * stasis_ringbuffer_get_rd_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
byte * stasis_ringbuffer_get_wr_buf(stasis_ringbuffer_t * ring, lsn_t off, lsn_t sz);
|
||||
lsn_t stasis_ringbuffer_get_read_tail(stasis_ringbuffer_t * ring);
|
||||
lsn_t stasis_ringbuffer_get_write_tail(stasis_ringbuffer_t * ring);
|
||||
lsn_t stasis_ringbuffer_get_write_frontier(stasis_ringbuffer_t * ring);
|
||||
|
|
Loading…
Reference in a new issue