more malloc fixes; add stasis_alloc, stasis_malloc_trailing_array
This commit is contained in:
parent
8c2547974a
commit
0153160218
55 changed files with 144 additions and 142 deletions
|
@ -100,7 +100,7 @@ int do_operations(pageid_t page_count, int num_threads, unsigned long long num_o
|
||||||
|
|
||||||
for(int i = 0; i < num_threads ; i++) {
|
for(int i = 0; i < num_threads ; i++) {
|
||||||
if(ops_remaining <= 0) { num_threads = i; break; }
|
if(ops_remaining <= 0) { num_threads = i; break; }
|
||||||
struct thread_arg *a = stasis_malloc(1, struct thread_arg);
|
struct thread_arg *a = stasis_alloc(struct thread_arg);
|
||||||
a->seed = base_seed + i;
|
a->seed = base_seed + i;
|
||||||
a->num_ops = ops_remaining < ops_per_thread ? ops_remaining : ops_per_thread;
|
a->num_ops = ops_remaining < ops_per_thread ? ops_remaining : ops_per_thread;
|
||||||
a->write_frac = write_frac;
|
a->write_frac = write_frac;
|
||||||
|
|
|
@ -134,7 +134,7 @@ static void void_double_add(void * val, struct rbtree * a, struct rbtree * b) {
|
||||||
static int availablePages_remove(stasis_allocation_policy_t *ap, pageid_t pageid);
|
static int availablePages_remove(stasis_allocation_policy_t *ap, pageid_t pageid);
|
||||||
static int availablePages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
static int availablePages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||||
int ret = availablePages_remove(ap, pageid);
|
int ret = availablePages_remove(ap, pageid);
|
||||||
availablePages_pageid_freespace* tup= stasis_malloc(1, availablePages_pageid_freespace);
|
availablePages_pageid_freespace* tup= stasis_alloc(availablePages_pageid_freespace);
|
||||||
tup->pageid = pageid;
|
tup->pageid = pageid;
|
||||||
tup->freespace = freespace;
|
tup->freespace = freespace;
|
||||||
void_double_add(tup, ap->availablePages_key_pageid, ap->availablePages_key_freespace_pageid);
|
void_double_add(tup, ap->availablePages_key_pageid, ap->availablePages_key_freespace_pageid);
|
||||||
|
@ -151,7 +151,7 @@ static int pageOwners_add(stasis_allocation_policy_t *ap, int xid, size_t freesp
|
||||||
|
|
||||||
int ret = pageOwners_remove(ap, pageid);
|
int ret = pageOwners_remove(ap, pageid);
|
||||||
|
|
||||||
pageOwners_xid_freespace_pageid * tup = stasis_malloc(1, pageOwners_xid_freespace_pageid);
|
pageOwners_xid_freespace_pageid * tup = stasis_alloc(pageOwners_xid_freespace_pageid);
|
||||||
tup->xid = xid;
|
tup->xid = xid;
|
||||||
tup->freespace = freespace;
|
tup->freespace = freespace;
|
||||||
tup->pageid = pageid;
|
tup->pageid = pageid;
|
||||||
|
@ -202,7 +202,7 @@ static int allPages_lookup_by_pageid(stasis_allocation_policy_t *ap, pageid_t pa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static int allPages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
static int allPages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||||
allPages_pageid_freespace * tup = stasis_malloc(1, allPages_pageid_freespace);
|
allPages_pageid_freespace * tup = stasis_alloc(allPages_pageid_freespace);
|
||||||
tup->pageid = pageid;
|
tup->pageid = pageid;
|
||||||
tup->freespace = freespace;
|
tup->freespace = freespace;
|
||||||
int ret = void_single_add(tup, ap->allPages_key_pageid);
|
int ret = void_single_add(tup, ap->allPages_key_pageid);
|
||||||
|
@ -230,7 +230,7 @@ static void allPages_removeAll(stasis_allocation_policy_t *ap) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void allPages_set_freespace(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
static void allPages_set_freespace(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||||
allPages_pageid_freespace * tup = stasis_malloc(1, allPages_pageid_freespace);
|
allPages_pageid_freespace * tup = stasis_alloc(allPages_pageid_freespace);
|
||||||
tup->pageid = pageid;
|
tup->pageid = pageid;
|
||||||
tup->freespace = freespace;
|
tup->freespace = freespace;
|
||||||
int existed = void_single_add(tup, ap->allPages_key_pageid);
|
int existed = void_single_add(tup, ap->allPages_key_pageid);
|
||||||
|
@ -323,7 +323,7 @@ static int xidAllocedDealloced_helper_remove(stasis_allocation_policy_t *ap, str
|
||||||
static int xidAllocedDealloced_helper_add(stasis_allocation_policy_t *ap, struct rbtree *first, struct rbtree* second, int xid, pageid_t pageid) {
|
static int xidAllocedDealloced_helper_add(stasis_allocation_policy_t *ap, struct rbtree *first, struct rbtree* second, int xid, pageid_t pageid) {
|
||||||
int existed = xidAllocedDealloced_helper_remove(ap, first, second, xid, pageid);
|
int existed = xidAllocedDealloced_helper_remove(ap, first, second, xid, pageid);
|
||||||
|
|
||||||
xidAllocedDealloced_xid_pageid * tup = stasis_malloc(1, xidAllocedDealloced_xid_pageid);
|
xidAllocedDealloced_xid_pageid * tup = stasis_alloc(xidAllocedDealloced_xid_pageid);
|
||||||
tup->xid = xid;
|
tup->xid = xid;
|
||||||
tup->pageid = pageid;
|
tup->pageid = pageid;
|
||||||
void_double_add(tup, first, second);
|
void_double_add(tup, first, second);
|
||||||
|
@ -454,7 +454,7 @@ static int xidAllocedDealloced_cmp_xid_pageid(const void *ap, const void *bp, co
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_allocation_policy_t * stasis_allocation_policy_init() {
|
stasis_allocation_policy_t * stasis_allocation_policy_init() {
|
||||||
stasis_allocation_policy_t * ap = stasis_malloc(1, stasis_allocation_policy_t);
|
stasis_allocation_policy_t * ap = stasis_alloc(stasis_allocation_policy_t);
|
||||||
ap->availablePages_key_pageid = rbinit(availablePages_cmp_pageid, 0);
|
ap->availablePages_key_pageid = rbinit(availablePages_cmp_pageid, 0);
|
||||||
ap->availablePages_key_freespace_pageid = rbinit(availablePages_cmp_freespace_pageid, 0);
|
ap->availablePages_key_freespace_pageid = rbinit(availablePages_cmp_freespace_pageid, 0);
|
||||||
ap->pageOwners_key_pageid = rbinit(pageOwners_cmp_pageid, 0);
|
ap->pageOwners_key_pageid = rbinit(pageOwners_cmp_pageid, 0);
|
||||||
|
|
|
@ -100,7 +100,7 @@ compensated_function Page * __profile_loadPage(int xid, pageid_t pageid, char *
|
||||||
int * pins = LH_ENTRY(find)(profile_load_pins_hash, &ret, sizeof(void*));
|
int * pins = LH_ENTRY(find)(profile_load_pins_hash, &ret, sizeof(void*));
|
||||||
|
|
||||||
if(!pins) {
|
if(!pins) {
|
||||||
pins = stasis_malloc(1, int);
|
pins = stasis_alloc(int);
|
||||||
*pins = 0;
|
*pins = 0;
|
||||||
LH_ENTRY(insert)(profile_load_pins_hash, &ret, sizeof(void*), pins);
|
LH_ENTRY(insert)(profile_load_pins_hash, &ret, sizeof(void*), pins);
|
||||||
}
|
}
|
||||||
|
|
|
@ -506,8 +506,8 @@ static int bhCloseHandleImpl(stasis_buffer_manager_t *bm, stasis_buffer_manager_
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_buffer_manager_t* stasis_buffer_manager_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
stasis_buffer_manager_t* stasis_buffer_manager_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||||
stasis_buffer_manager_t *bm = stasis_malloc(1, stasis_buffer_manager_t);
|
stasis_buffer_manager_t *bm = stasis_alloc(stasis_buffer_manager_t);
|
||||||
stasis_buffer_hash_t *bh = stasis_malloc(1, stasis_buffer_hash_t);
|
stasis_buffer_hash_t *bh = stasis_alloc(stasis_buffer_hash_t);
|
||||||
|
|
||||||
bm->openHandleImpl = bhOpenHandleImpl;
|
bm->openHandleImpl = bhOpenHandleImpl;
|
||||||
bm->closeHandleImpl = bhCloseHandleImpl;
|
bm->closeHandleImpl = bhCloseHandleImpl;
|
||||||
|
|
|
@ -177,7 +177,7 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
|
||||||
stasis_buffer_concurrent_hash_t *ch = bm->impl;
|
stasis_buffer_concurrent_hash_t *ch = bm->impl;
|
||||||
stasis_buffer_concurrent_hash_tls_t *tls = pthread_getspecific(ch->key);
|
stasis_buffer_concurrent_hash_tls_t *tls = pthread_getspecific(ch->key);
|
||||||
if(tls == NULL) {
|
if(tls == NULL) {
|
||||||
tls = stasis_malloc(1, stasis_buffer_concurrent_hash_tls_t);
|
tls = stasis_alloc(stasis_buffer_concurrent_hash_tls_t);
|
||||||
tls->p = NULL;
|
tls->p = NULL;
|
||||||
tls->bm = bm;
|
tls->bm = bm;
|
||||||
pthread_setspecific(ch->key, tls);
|
pthread_setspecific(ch->key, tls);
|
||||||
|
@ -409,8 +409,8 @@ static int chCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||||
stasis_buffer_manager_t *bm = stasis_malloc(1, stasis_buffer_manager_t);
|
stasis_buffer_manager_t *bm = stasis_alloc(stasis_buffer_manager_t);
|
||||||
stasis_buffer_concurrent_hash_t *ch = stasis_malloc(1, stasis_buffer_concurrent_hash_t);
|
stasis_buffer_concurrent_hash_t *ch = stasis_alloc(stasis_buffer_concurrent_hash_t);
|
||||||
bm->openHandleImpl = chOpenHandle;
|
bm->openHandleImpl = chOpenHandle;
|
||||||
bm->closeHandleImpl = chCloseHandle;
|
bm->closeHandleImpl = chCloseHandle;
|
||||||
bm->loadPageImpl = chLoadPageImpl;
|
bm->loadPageImpl = chLoadPageImpl;
|
||||||
|
|
|
@ -54,7 +54,7 @@ static int bufManCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_
|
||||||
|
|
||||||
stasis_buffer_manager_t* stasis_buffer_manager_deprecated_open(stasis_page_handle_t * ph) {
|
stasis_buffer_manager_t* stasis_buffer_manager_deprecated_open(stasis_page_handle_t * ph) {
|
||||||
page_handle = ph;
|
page_handle = ph;
|
||||||
stasis_buffer_manager_t * bm = stasis_malloc(1, stasis_buffer_manager_t);
|
stasis_buffer_manager_t * bm = stasis_alloc(stasis_buffer_manager_t);
|
||||||
bm->releasePageImpl = bufManReleasePage;
|
bm->releasePageImpl = bufManReleasePage;
|
||||||
bm->openHandleImpl = bufManOpenHandle;
|
bm->openHandleImpl = bufManOpenHandle;
|
||||||
bm->closeHandleImpl = bufManCloseHandle;
|
bm->closeHandleImpl = bufManCloseHandle;
|
||||||
|
|
|
@ -130,7 +130,7 @@ static void pfPageWrite(stasis_page_handle_t * h, Page * ret) {
|
||||||
|
|
||||||
/** @todo O_DIRECT is broken in older linuxes (eg 2.4). The build script should disable it on such platforms. */
|
/** @todo O_DIRECT is broken in older linuxes (eg 2.4). The build script should disable it on such platforms. */
|
||||||
stasis_page_handle_t* openPageFile(stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
stasis_page_handle_t* openPageFile(stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
stasis_page_handle_t * ret = stasis_alloc(stasis_page_handle_t);
|
||||||
ret->read = pfPageRead;
|
ret->read = pfPageRead;
|
||||||
ret->write = pfPageWrite;
|
ret->write = pfPageWrite;
|
||||||
ret->force_file = pfForcePageFile;
|
ret->force_file = pfForcePageFile;
|
||||||
|
|
|
@ -23,7 +23,7 @@ static Page * paLoadPage(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!pa->pageMap[pageid]) {
|
if(!pa->pageMap[pageid]) {
|
||||||
pa->pageMap[pageid] = stasis_malloc(1, Page);
|
pa->pageMap[pageid] = stasis_alloc(Page);
|
||||||
pa->pageMap[pageid]->id = pageid;
|
pa->pageMap[pageid]->id = pageid;
|
||||||
pa->pageMap[pageid]->pageType = type == UNKNOWN_TYPE_PAGE ? 0 : type;
|
pa->pageMap[pageid]->pageType = type == UNKNOWN_TYPE_PAGE ? 0 : type;
|
||||||
pa->pageMap[pageid]->LSN = 0;
|
pa->pageMap[pageid]->LSN = 0;
|
||||||
|
@ -82,8 +82,8 @@ static int paCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
||||||
|
|
||||||
stasis_buffer_manager_t * stasis_buffer_manager_mem_array_open () {
|
stasis_buffer_manager_t * stasis_buffer_manager_mem_array_open () {
|
||||||
|
|
||||||
stasis_buffer_manager_t * bm = stasis_malloc(1, stasis_buffer_manager_t);
|
stasis_buffer_manager_t * bm = stasis_alloc(stasis_buffer_manager_t);
|
||||||
stasis_buffer_manager_page_array_t * pa = stasis_malloc(1, stasis_buffer_manager_page_array_t);
|
stasis_buffer_manager_page_array_t * pa = stasis_alloc(stasis_buffer_manager_page_array_t);
|
||||||
|
|
||||||
bm->releasePageImpl = paReleasePage;
|
bm->releasePageImpl = paReleasePage;
|
||||||
bm->openHandleImpl = paOpenHandle;
|
bm->openHandleImpl = paOpenHandle;
|
||||||
|
|
|
@ -64,7 +64,7 @@ struct stasis_buffer_pool_t {
|
||||||
|
|
||||||
stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
||||||
|
|
||||||
stasis_buffer_pool_t * ret = stasis_malloc(1, stasis_buffer_pool_t);
|
stasis_buffer_pool_t * ret = stasis_alloc(stasis_buffer_pool_t);
|
||||||
|
|
||||||
ret->nextPage = 0;
|
ret->nextPage = 0;
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ int __lladd_pthread_mutex_lock(lladd_pthread_mutex_t *mutex, char * file, int li
|
||||||
mutex->last_acquired_at = location;
|
mutex->last_acquired_at = location;
|
||||||
|
|
||||||
if(!tup) {
|
if(!tup) {
|
||||||
tup = stasis_malloc(1, profile_tuple);
|
tup = stasis_alloc(profile_tuple);
|
||||||
|
|
||||||
init_tuple(tup);
|
init_tuple(tup);
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ int __lladd_pthread_cond_wait(pthread_cond_t *cond, lladd_pthread_mutex_t *mutex
|
||||||
mutex->last_acquired_at = location;
|
mutex->last_acquired_at = location;
|
||||||
|
|
||||||
if(!tup) {
|
if(!tup) {
|
||||||
tup = stasis_malloc(1, profile_tuple);
|
tup = stasis_alloc(profile_tuple);
|
||||||
|
|
||||||
init_tuple(tup);
|
init_tuple(tup);
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ int __lladd_pthread_cond_timedwait(pthread_cond_t *cond, lladd_pthread_mutex_t *
|
||||||
#undef downgradelock
|
#undef downgradelock
|
||||||
|
|
||||||
__profile_rwl *__profile_rw_initlock (char * file, int line) {
|
__profile_rwl *__profile_rw_initlock (char * file, int line) {
|
||||||
__profile_rwl * ret = stasis_malloc(1, __profile_rwl);
|
__profile_rwl * ret = stasis_alloc(__profile_rwl);
|
||||||
|
|
||||||
ret->file = file;
|
ret->file = file;
|
||||||
ret->line = line;
|
ret->line = line;
|
||||||
|
@ -219,7 +219,7 @@ void __profile_readlock (__profile_rwl *lock, int d, char * file, int line) {
|
||||||
lock->last_acquired_at = location;
|
lock->last_acquired_at = location;
|
||||||
|
|
||||||
if(!tup) {
|
if(!tup) {
|
||||||
tup = stasis_malloc(1, profile_tuple);
|
tup = stasis_alloc(profile_tuple);
|
||||||
|
|
||||||
init_tuple(tup);
|
init_tuple(tup);
|
||||||
|
|
||||||
|
@ -258,7 +258,7 @@ void __profile_writelock (__profile_rwl *lock, int d, char * file, int line) {
|
||||||
lock->last_acquired_at = location;
|
lock->last_acquired_at = location;
|
||||||
|
|
||||||
if(!tup) {
|
if(!tup) {
|
||||||
tup = stasis_malloc(1, profile_tuple);
|
tup = stasis_alloc(profile_tuple);
|
||||||
|
|
||||||
init_tuple(tup);
|
init_tuple(tup);
|
||||||
|
|
||||||
|
|
|
@ -55,13 +55,13 @@ void stasis_dirty_page_table_set_dirty(stasis_dirty_page_table_t * dirtyPages, P
|
||||||
pthread_mutex_lock(&dirtyPages->mutex);
|
pthread_mutex_lock(&dirtyPages->mutex);
|
||||||
if(!p->dirty) {
|
if(!p->dirty) {
|
||||||
p->dirty = 1;
|
p->dirty = 1;
|
||||||
dpt_entry * e = stasis_malloc(1, dpt_entry);
|
dpt_entry * e = stasis_alloc(dpt_entry);
|
||||||
e->p = p->id;
|
e->p = p->id;
|
||||||
e->lsn = p->LSN;
|
e->lsn = p->LSN;
|
||||||
const void * ret = rbsearch(e, dirtyPages->tableByPage);
|
const void * ret = rbsearch(e, dirtyPages->tableByPage);
|
||||||
assert(ret == e); // otherwise, the entry was already in the table.
|
assert(ret == e); // otherwise, the entry was already in the table.
|
||||||
|
|
||||||
e = stasis_malloc(1, dpt_entry);
|
e = stasis_alloc(dpt_entry);
|
||||||
e->p = p->id;
|
e->p = p->id;
|
||||||
e->lsn = p->LSN;
|
e->lsn = p->LSN;
|
||||||
ret = rbsearch(e, dirtyPages->tableByLsnAndPage);
|
ret = rbsearch(e, dirtyPages->tableByLsnAndPage);
|
||||||
|
@ -344,7 +344,7 @@ void stasis_dirty_page_table_set_buffer_manager(stasis_dirty_page_table_t * dpt,
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_dirty_page_table_t * stasis_dirty_page_table_init() {
|
stasis_dirty_page_table_t * stasis_dirty_page_table_init() {
|
||||||
stasis_dirty_page_table_t * ret = stasis_malloc(1, stasis_dirty_page_table_t);
|
stasis_dirty_page_table_t * ret = stasis_alloc(stasis_dirty_page_table_t);
|
||||||
ret->outstanding_flush_lsns = stasis_util_multiset_create();
|
ret->outstanding_flush_lsns = stasis_util_multiset_create();
|
||||||
|
|
||||||
ret->tableByPage = rbinit(dpt_cmp_page, 0);
|
ret->tableByPage = rbinit(dpt_cmp_page, 0);
|
||||||
|
|
|
@ -61,7 +61,7 @@ static stasis_write_buffer_t * debug_write_buffer(stasis_handle_t * h,
|
||||||
printf("tid=%9ld call write_buffer(%lx, %lld, %lld)\n",
|
printf("tid=%9ld call write_buffer(%lx, %lld, %lld)\n",
|
||||||
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
||||||
stasis_write_buffer_t * ret = hh->write_buffer(hh,off,len);
|
stasis_write_buffer_t * ret = hh->write_buffer(hh,off,len);
|
||||||
stasis_write_buffer_t * retWrap = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t * retWrap = stasis_alloc(stasis_write_buffer_t);
|
||||||
*retWrap = *ret;
|
*retWrap = *ret;
|
||||||
retWrap->h = h;
|
retWrap->h = h;
|
||||||
retWrap->impl = ret;
|
retWrap->impl = ret;
|
||||||
|
@ -85,7 +85,7 @@ static stasis_read_buffer_t * debug_read_buffer(stasis_handle_t * h,
|
||||||
printf("tid=%9ld call read_buffer(%lx, %lld, %lld)\n",
|
printf("tid=%9ld call read_buffer(%lx, %lld, %lld)\n",
|
||||||
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
||||||
stasis_read_buffer_t * ret = hh->read_buffer(hh,off,len);
|
stasis_read_buffer_t * ret = hh->read_buffer(hh,off,len);
|
||||||
stasis_read_buffer_t * retWrap = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t * retWrap = stasis_alloc(stasis_read_buffer_t);
|
||||||
*retWrap = *ret;
|
*retWrap = *ret;
|
||||||
retWrap->h = h;
|
retWrap->h = h;
|
||||||
retWrap->impl = ret;
|
retWrap->impl = ret;
|
||||||
|
@ -156,9 +156,9 @@ struct stasis_handle_t debug_func = {
|
||||||
|
|
||||||
|
|
||||||
stasis_handle_t * stasis_handle(open_debug)(stasis_handle_t * h) {
|
stasis_handle_t * stasis_handle(open_debug)(stasis_handle_t * h) {
|
||||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t * ret = stasis_alloc(stasis_handle_t);
|
||||||
*ret = debug_func;
|
*ret = debug_func;
|
||||||
ret->impl = stasis_malloc(1, debug_impl);
|
ret->impl = stasis_alloc(debug_impl);
|
||||||
((debug_impl*)(ret->impl))->h = h;
|
((debug_impl*)(ret->impl))->h = h;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -245,7 +245,7 @@ static int file_write(stasis_handle_t *h, lsn_t off, const byte * dat, lsn_t len
|
||||||
static stasis_write_buffer_t * file_write_buffer(stasis_handle_t * h,
|
static stasis_write_buffer_t * file_write_buffer(stasis_handle_t * h,
|
||||||
lsn_t off, lsn_t len) {
|
lsn_t off, lsn_t len) {
|
||||||
// Allocate the handle
|
// Allocate the handle
|
||||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t * ret = stasis_alloc(stasis_write_buffer_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
|
|
||||||
file_impl * impl = (file_impl*)h->impl;
|
file_impl * impl = (file_impl*)h->impl;
|
||||||
|
@ -312,7 +312,7 @@ static int file_release_write_buffer(stasis_write_buffer_t * w) {
|
||||||
|
|
||||||
static stasis_read_buffer_t * file_read_buffer(stasis_handle_t * h,
|
static stasis_read_buffer_t * file_read_buffer(stasis_handle_t * h,
|
||||||
lsn_t off, lsn_t len) {
|
lsn_t off, lsn_t len) {
|
||||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t * ret = stasis_alloc(stasis_read_buffer_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
|
|
||||||
byte * buf = stasis_malloc(len, byte);
|
byte * buf = stasis_malloc(len, byte);
|
||||||
|
@ -484,11 +484,11 @@ struct stasis_handle_t file_func = {
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_handle_t * stasis_handle(open_file)(const char * filename, int flags, int mode) {
|
stasis_handle_t * stasis_handle(open_file)(const char * filename, int flags, int mode) {
|
||||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t * ret = stasis_alloc(stasis_handle_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
*ret = file_func;
|
*ret = file_func;
|
||||||
|
|
||||||
file_impl * impl = stasis_malloc(1, file_impl);
|
file_impl * impl = stasis_alloc(file_impl);
|
||||||
ret->impl = impl;
|
ret->impl = impl;
|
||||||
pthread_mutex_init(&(impl->mut), 0);
|
pthread_mutex_init(&(impl->mut), 0);
|
||||||
assert(sizeof(off_t) >= (64/8));
|
assert(sizeof(off_t) >= (64/8));
|
||||||
|
|
|
@ -45,7 +45,7 @@ static stasis_write_buffer_t * mem_write_buffer(stasis_handle_t * h,
|
||||||
lsn_t off, lsn_t len) {
|
lsn_t off, lsn_t len) {
|
||||||
mem_impl* impl = (mem_impl*)(h->impl);
|
mem_impl* impl = (mem_impl*)(h->impl);
|
||||||
|
|
||||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t * ret = stasis_alloc(stasis_write_buffer_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
|
|
||||||
pthread_mutex_lock(&(impl->mut));
|
pthread_mutex_lock(&(impl->mut));
|
||||||
|
@ -102,7 +102,7 @@ static stasis_read_buffer_t * mem_read_buffer(stasis_handle_t * h,
|
||||||
mem_impl * impl = (mem_impl*)(h->impl);
|
mem_impl * impl = (mem_impl*)(h->impl);
|
||||||
pthread_mutex_lock(&(impl->mut));
|
pthread_mutex_lock(&(impl->mut));
|
||||||
|
|
||||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t * ret = stasis_alloc(stasis_read_buffer_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
|
|
||||||
if(off < 0 || off + len > impl->end_pos) {
|
if(off < 0 || off + len > impl->end_pos) {
|
||||||
|
@ -183,15 +183,15 @@ struct stasis_handle_t mem_func = {
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_handle_t * stasis_handle(open_memory)() {
|
stasis_handle_t * stasis_handle(open_memory)() {
|
||||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t * ret = stasis_alloc(stasis_handle_t);
|
||||||
if(!ret) { return NULL; }
|
if(!ret) { return NULL; }
|
||||||
*ret = mem_func;
|
*ret = mem_func;
|
||||||
|
|
||||||
mem_impl * impl = stasis_malloc(1, mem_impl);
|
mem_impl * impl = stasis_alloc(mem_impl);
|
||||||
ret->impl = impl;
|
ret->impl = impl;
|
||||||
pthread_mutex_init(&(impl->mut), 0);
|
pthread_mutex_init(&(impl->mut), 0);
|
||||||
impl->end_pos = 0;
|
impl->end_pos = 0;
|
||||||
impl->buf = malloc(0);
|
impl->buf = stasis_malloc(0, byte);
|
||||||
impl->refcount = 1;
|
impl->refcount = 1;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -66,7 +66,7 @@
|
||||||
static inline stasis_read_buffer_t* alloc_read_buffer_error(stasis_handle_t *h,
|
static inline stasis_read_buffer_t* alloc_read_buffer_error(stasis_handle_t *h,
|
||||||
int error) {
|
int error) {
|
||||||
assert(error);
|
assert(error);
|
||||||
stasis_read_buffer_t * r = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t * r = stasis_alloc(stasis_read_buffer_t);
|
||||||
r->h = h;
|
r->h = h;
|
||||||
r->buf = 0;
|
r->buf = 0;
|
||||||
r->len = 0;
|
r->len = 0;
|
||||||
|
@ -78,7 +78,7 @@ static inline stasis_read_buffer_t* alloc_read_buffer_error(stasis_handle_t *h,
|
||||||
static inline stasis_write_buffer_t* alloc_write_buffer_error
|
static inline stasis_write_buffer_t* alloc_write_buffer_error
|
||||||
(stasis_handle_t *h, int error) {
|
(stasis_handle_t *h, int error) {
|
||||||
assert(error);
|
assert(error);
|
||||||
stasis_write_buffer_t * w = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t * w = stasis_alloc(stasis_write_buffer_t);
|
||||||
w->h = h;
|
w->h = h;
|
||||||
w->off = 0;
|
w->off = 0;
|
||||||
w->buf = 0;
|
w->buf = 0;
|
||||||
|
@ -214,7 +214,7 @@ static void releaseSlowHandle(nbw_impl * impl, stasis_handle_t * slow) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static tree_node * allocTreeNode(lsn_t off, lsn_t len) {
|
static tree_node * allocTreeNode(lsn_t off, lsn_t len) {
|
||||||
tree_node * ret = stasis_malloc(1, tree_node);
|
tree_node * ret = stasis_alloc(tree_node);
|
||||||
ret->start_pos = off;
|
ret->start_pos = off;
|
||||||
ret->end_pos = off + len;
|
ret->end_pos = off + len;
|
||||||
ret->dirty = CLEAN;
|
ret->dirty = CLEAN;
|
||||||
|
@ -408,11 +408,11 @@ static stasis_write_buffer_t * nbw_write_buffer(stasis_handle_t * h,
|
||||||
const tree_node * n = allocFastHandle(impl, off, len);
|
const tree_node * n = allocFastHandle(impl, off, len);
|
||||||
stasis_write_buffer_t * w = n->h->write_buffer(n->h, off, len);
|
stasis_write_buffer_t * w = n->h->write_buffer(n->h, off, len);
|
||||||
|
|
||||||
write_buffer_impl * w_impl = stasis_malloc(1, write_buffer_impl);
|
write_buffer_impl * w_impl = stasis_alloc(write_buffer_impl);
|
||||||
w_impl->n = n;
|
w_impl->n = n;
|
||||||
w_impl->w = w;
|
w_impl->w = w;
|
||||||
|
|
||||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t * ret = stasis_alloc(stasis_write_buffer_t);
|
||||||
ret->h = h;
|
ret->h = h;
|
||||||
ret->off = w->off;
|
ret->off = w->off;
|
||||||
ret->len = w->len;
|
ret->len = w->len;
|
||||||
|
@ -454,11 +454,11 @@ static stasis_read_buffer_t * nbw_read_buffer(stasis_handle_t * h,
|
||||||
stasis_handle_t * r_h = n ? n->h : getSlowHandle(impl);
|
stasis_handle_t * r_h = n ? n->h : getSlowHandle(impl);
|
||||||
r = r_h->read_buffer(r_h, off, len);
|
r = r_h->read_buffer(r_h, off, len);
|
||||||
|
|
||||||
read_buffer_impl * r_impl = stasis_malloc(1, read_buffer_impl);
|
read_buffer_impl * r_impl = stasis_alloc(read_buffer_impl);
|
||||||
r_impl->n = n;
|
r_impl->n = n;
|
||||||
r_impl->r = r;
|
r_impl->r = r;
|
||||||
|
|
||||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t * ret = stasis_alloc(stasis_read_buffer_t);
|
||||||
ret->h = h;
|
ret->h = h;
|
||||||
ret->off = r->off;
|
ret->off = r->off;
|
||||||
ret->len = r->len;
|
ret->len = r->len;
|
||||||
|
@ -665,7 +665,7 @@ static void * nbw_worker(void * handle) {
|
||||||
memcpy(buf, r->buf, r->len);
|
memcpy(buf, r->buf, r->len);
|
||||||
buf_off += r->len;
|
buf_off += r->len;
|
||||||
|
|
||||||
dummies = stasis_malloc(1, tree_node);
|
dummies = stasis_alloc(tree_node);
|
||||||
dummies[0] = dummy;
|
dummies[0] = dummy;
|
||||||
dummy_count = 1;
|
dummy_count = 1;
|
||||||
first = 0;
|
first = 0;
|
||||||
|
@ -774,7 +774,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
||||||
stasis_handle_t * (*fast_factory)(lsn_t, lsn_t, void *),
|
stasis_handle_t * (*fast_factory)(lsn_t, lsn_t, void *),
|
||||||
void * fast_factory_arg, int worker_thread_count, lsn_t buffer_size,
|
void * fast_factory_arg, int worker_thread_count, lsn_t buffer_size,
|
||||||
int max_fast_handles) {
|
int max_fast_handles) {
|
||||||
nbw_impl * impl = stasis_malloc(1, nbw_impl);
|
nbw_impl * impl = stasis_alloc(nbw_impl);
|
||||||
pthread_mutex_init(&impl->mut, 0);
|
pthread_mutex_init(&impl->mut, 0);
|
||||||
|
|
||||||
impl->end_pos = 0;
|
impl->end_pos = 0;
|
||||||
|
@ -787,7 +787,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
||||||
|
|
||||||
impl->available_slow_handles = 0;
|
impl->available_slow_handles = 0;
|
||||||
impl->available_slow_handle_count = 0;
|
impl->available_slow_handle_count = 0;
|
||||||
impl->all_slow_handles = stasis_malloc(1, stasis_handle_t*);
|
impl->all_slow_handles = stasis_alloc(stasis_handle_t*);
|
||||||
impl->all_slow_handle_count = 0;
|
impl->all_slow_handle_count = 0;
|
||||||
|
|
||||||
impl->requested_bytes_written = 0;
|
impl->requested_bytes_written = 0;
|
||||||
|
@ -812,7 +812,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
||||||
impl->still_open = 1;
|
impl->still_open = 1;
|
||||||
impl->refcount = 1;
|
impl->refcount = 1;
|
||||||
|
|
||||||
stasis_handle_t *h = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t *h = stasis_alloc(stasis_handle_t);
|
||||||
*h = nbw_func;
|
*h = nbw_func;
|
||||||
h->impl = impl;
|
h->impl = impl;
|
||||||
|
|
||||||
|
@ -855,7 +855,7 @@ static stasis_handle_t * slow_pfile_factory(void * argsP) {
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
static int nop_close(stasis_handle_t*h) { return 0; }
|
static int nop_close(stasis_handle_t*h) { return 0; }
|
||||||
struct sf_args * slow_arg = stasis_malloc(1, struct sf_args);
|
struct sf_args * slow_arg = stasis_alloc(struct sf_args);
|
||||||
slow_arg->filename = path;
|
slow_arg->filename = path;
|
||||||
|
|
||||||
slow_arg->openMode = openMode;
|
slow_arg->openMode = openMode;
|
||||||
|
|
|
@ -198,7 +198,7 @@ static int pfile_write(stasis_handle_t *h, lsn_t off, const byte *dat,
|
||||||
|
|
||||||
static stasis_write_buffer_t * pfile_write_buffer(stasis_handle_t *h,
|
static stasis_write_buffer_t * pfile_write_buffer(stasis_handle_t *h,
|
||||||
lsn_t off, lsn_t len) {
|
lsn_t off, lsn_t len) {
|
||||||
stasis_write_buffer_t *ret = stasis_malloc(1, stasis_write_buffer_t);
|
stasis_write_buffer_t *ret = stasis_alloc(stasis_write_buffer_t);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
h->error = ENOMEM;
|
h->error = ENOMEM;
|
||||||
|
@ -249,7 +249,7 @@ static int pfile_release_write_buffer(stasis_write_buffer_t *w) {
|
||||||
|
|
||||||
static stasis_read_buffer_t *pfile_read_buffer(stasis_handle_t *h,
|
static stasis_read_buffer_t *pfile_read_buffer(stasis_handle_t *h,
|
||||||
lsn_t off, lsn_t len) {
|
lsn_t off, lsn_t len) {
|
||||||
stasis_read_buffer_t *ret = stasis_malloc(1, stasis_read_buffer_t);
|
stasis_read_buffer_t *ret = stasis_alloc(stasis_read_buffer_t);
|
||||||
if (!ret) { return NULL; }
|
if (!ret) { return NULL; }
|
||||||
|
|
||||||
byte *buf = stasis_malloc(len,byte);
|
byte *buf = stasis_malloc(len,byte);
|
||||||
|
@ -417,11 +417,11 @@ static struct stasis_handle_t pfile_func = {
|
||||||
|
|
||||||
stasis_handle_t *stasis_handle(open_pfile)(const char *filename,
|
stasis_handle_t *stasis_handle(open_pfile)(const char *filename,
|
||||||
int flags, int mode) {
|
int flags, int mode) {
|
||||||
stasis_handle_t *ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t *ret = stasis_alloc(stasis_handle_t);
|
||||||
if (!ret) { return NULL; }
|
if (!ret) { return NULL; }
|
||||||
*ret = pfile_func;
|
*ret = pfile_func;
|
||||||
|
|
||||||
pfile_impl *impl = stasis_malloc(1, pfile_impl);
|
pfile_impl *impl = stasis_alloc(pfile_impl);
|
||||||
if (!impl) { free(ret); return NULL; }
|
if (!impl) { free(ret); return NULL; }
|
||||||
|
|
||||||
ret->impl = impl;
|
ret->impl = impl;
|
||||||
|
|
|
@ -191,9 +191,9 @@ struct stasis_handle_t raid0_func = {
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_handle_t * stasis_handle_open_raid0(int handle_count, stasis_handle_t** h, uint32_t stripe_size) {
|
stasis_handle_t * stasis_handle_open_raid0(int handle_count, stasis_handle_t** h, uint32_t stripe_size) {
|
||||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t * ret = stasis_alloc(stasis_handle_t);
|
||||||
*ret = raid0_func;
|
*ret = raid0_func;
|
||||||
raid0_impl * r = stasis_malloc(1, raid0_impl);
|
raid0_impl * r = stasis_alloc(raid0_impl);
|
||||||
r->stripe_size = stripe_size;
|
r->stripe_size = stripe_size;
|
||||||
r->handle_count = handle_count;
|
r->handle_count = handle_count;
|
||||||
r->h = stasis_malloc(handle_count, stasis_handle_t*);
|
r->h = stasis_malloc(handle_count, stasis_handle_t*);
|
||||||
|
|
|
@ -149,9 +149,9 @@ struct stasis_handle_t raid1_func = {
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_handle_t * stasis_handle_open_raid1(stasis_handle_t* a, stasis_handle_t* b) {
|
stasis_handle_t * stasis_handle_open_raid1(stasis_handle_t* a, stasis_handle_t* b) {
|
||||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
stasis_handle_t * ret = stasis_alloc(stasis_handle_t);
|
||||||
*ret = raid1_func;
|
*ret = raid1_func;
|
||||||
raid1_impl * i = stasis_malloc(1, raid1_impl);
|
raid1_impl * i = stasis_alloc(raid1_impl);
|
||||||
i->a = a; i->b = b;
|
i->a = a; i->b = b;
|
||||||
ret->impl = i;
|
ret->impl = i;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -24,7 +24,7 @@ static int cmp_transition(const void * a, const void * b, const void * arg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rangeTracker * rangeTrackerInit(int quantization) {
|
rangeTracker * rangeTrackerInit(int quantization) {
|
||||||
rangeTracker * ret = stasis_malloc(1, rangeTracker);
|
rangeTracker * ret = stasis_alloc(rangeTracker);
|
||||||
ret->ranges = RB_ENTRY(init)(cmp_transition, 0);
|
ret->ranges = RB_ENTRY(init)(cmp_transition, 0);
|
||||||
ret->quantization = quantization;
|
ret->quantization = quantization;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -64,7 +64,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
||||||
assert(t->pins + t->delta >= 0);
|
assert(t->pins + t->delta >= 0);
|
||||||
if(t->pos != r->start) {
|
if(t->pos != r->start) {
|
||||||
int newpins = t->pins + t->delta;
|
int newpins = t->pins + t->delta;
|
||||||
t = stasis_malloc(1, transition);
|
t = stasis_alloc(transition);
|
||||||
t->pos = r->start;
|
t->pos = r->start;
|
||||||
t->delta = delta;
|
t->delta = delta;
|
||||||
t->pins = newpins;
|
t->pins = newpins;
|
||||||
|
@ -84,7 +84,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
t = stasis_malloc(1, transition);
|
t = stasis_alloc(transition);
|
||||||
t->pos = r->start;
|
t->pos = r->start;
|
||||||
t->delta = delta;
|
t->delta = delta;
|
||||||
t->pins = 0;
|
t->pins = 0;
|
||||||
|
@ -111,7 +111,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
||||||
}
|
}
|
||||||
if(!t || t->pos != r->stop) {
|
if(!t || t->pos != r->stop) {
|
||||||
// Need to allocate new transition
|
// Need to allocate new transition
|
||||||
t = stasis_malloc(1, transition);
|
t = stasis_alloc(transition);
|
||||||
t->pos = r->stop;
|
t->pos = r->stop;
|
||||||
t->delta = 0-delta;
|
t->delta = 0-delta;
|
||||||
t->pins = curpin;
|
t->pins = curpin;
|
||||||
|
@ -167,7 +167,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
||||||
assert(!t->pins);
|
assert(!t->pins);
|
||||||
assert(t->delta);
|
assert(t->delta);
|
||||||
assert(! ret[next_range] );
|
assert(! ret[next_range] );
|
||||||
ret[next_range] = stasis_malloc(1, range);
|
ret[next_range] = stasis_alloc(range);
|
||||||
ret[next_range]->start = t->pos;
|
ret[next_range]->start = t->pos;
|
||||||
in_range = 1;
|
in_range = 1;
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
||||||
if(t->pins + t->delta) {
|
if(t->pins + t->delta) {
|
||||||
if(!in_range) {
|
if(!in_range) {
|
||||||
assert(! ret[next_range]);
|
assert(! ret[next_range]);
|
||||||
ret[next_range] = stasis_malloc(1, range);
|
ret[next_range] = stasis_alloc(range);
|
||||||
ret[next_range]->start = t->pos;
|
ret[next_range]->start = t->pos;
|
||||||
in_range = 1;
|
in_range = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -352,7 +352,7 @@ LogEntry * stasis_log_file_pool_reserve_entry(stasis_log_t * log, size_t szs) {
|
||||||
uint32_t sz = szs;
|
uint32_t sz = szs;
|
||||||
stasis_log_file_pool_state * fp = log->impl;
|
stasis_log_file_pool_state * fp = log->impl;
|
||||||
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
||||||
if(!handle) { handle = stasis_malloc(1, lsn_t); pthread_setspecific(fp->handle_key, handle); }
|
if(!handle) { handle = stasis_alloc(lsn_t); pthread_setspecific(fp->handle_key, handle); }
|
||||||
|
|
||||||
uint64_t framed_size = sz+sizeof(uint32_t)+sizeof(uint32_t);
|
uint64_t framed_size = sz+sizeof(uint32_t)+sizeof(uint32_t);
|
||||||
lsn_t off = stasis_ringbuffer_reserve_space(fp->ring, framed_size, handle);
|
lsn_t off = stasis_ringbuffer_reserve_space(fp->ring, framed_size, handle);
|
||||||
|
@ -731,8 +731,8 @@ int filesort(const void * ap, const void * bp) {
|
||||||
*/
|
*/
|
||||||
stasis_log_t* stasis_log_file_pool_open(const char* dirname, int filemode, int fileperm) {
|
stasis_log_t* stasis_log_file_pool_open(const char* dirname, int filemode, int fileperm) {
|
||||||
struct dirent **namelist;
|
struct dirent **namelist;
|
||||||
stasis_log_file_pool_state* fp = stasis_malloc(1, stasis_log_file_pool_state);
|
stasis_log_file_pool_state* fp = stasis_alloc(stasis_log_file_pool_state);
|
||||||
stasis_log_t * ret = stasis_malloc(1, stasis_log_t);
|
stasis_log_t * ret = stasis_alloc(stasis_log_t);
|
||||||
|
|
||||||
static const stasis_log_t proto = {
|
static const stasis_log_t proto = {
|
||||||
stasis_log_file_pool_set_truncation,
|
stasis_log_file_pool_set_truncation,
|
||||||
|
|
|
@ -27,7 +27,7 @@ stasis_log_group_force_t * stasis_log_group_force_init(stasis_log_t * log, uint6
|
||||||
"times > 1 second. (%llu second wait time requested)\n",
|
"times > 1 second. (%llu second wait time requested)\n",
|
||||||
(long long unsigned int) (wait_nsec / (1000 * 1000 * 1000)));
|
(long long unsigned int) (wait_nsec / (1000 * 1000 * 1000)));
|
||||||
}
|
}
|
||||||
stasis_log_group_force_t * ret = stasis_malloc(1, stasis_log_group_force_t);
|
stasis_log_group_force_t * ret = stasis_alloc(stasis_log_group_force_t);
|
||||||
ret->log = log;
|
ret->log = log;
|
||||||
pthread_mutex_init(&ret->check_commit,0);
|
pthread_mutex_init(&ret->check_commit,0);
|
||||||
pthread_cond_init(&ret->tooFewXacts,0);
|
pthread_cond_init(&ret->tooFewXacts,0);
|
||||||
|
|
|
@ -189,7 +189,7 @@ static void stasis_log_impl_in_memory_set_truncation(stasis_log_t *log, stasis_t
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_log_t* stasis_log_impl_in_memory_open() {
|
stasis_log_t* stasis_log_impl_in_memory_open() {
|
||||||
stasis_log_impl_in_memory * impl = stasis_malloc(1, stasis_log_impl_in_memory);
|
stasis_log_impl_in_memory * impl = stasis_alloc(stasis_log_impl_in_memory);
|
||||||
impl->flushedLSN_lock = initlock();
|
impl->flushedLSN_lock = initlock();
|
||||||
impl->globalOffset_lock = initlock();
|
impl->globalOffset_lock = initlock();
|
||||||
impl->globalOffset = 0;
|
impl->globalOffset = 0;
|
||||||
|
@ -221,7 +221,7 @@ stasis_log_t* stasis_log_impl_in_memory_open() {
|
||||||
stasis_log_impl_in_memory_close,
|
stasis_log_impl_in_memory_close,
|
||||||
stasis_log_impl_in_memory_is_durable
|
stasis_log_impl_in_memory_is_durable
|
||||||
};
|
};
|
||||||
stasis_log_t* log = stasis_malloc(1, stasis_log_t);
|
stasis_log_t* log = stasis_alloc(stasis_log_t);
|
||||||
memcpy(log,&proto, sizeof(proto));
|
memcpy(log,&proto, sizeof(proto));
|
||||||
log->impl = impl;
|
log->impl = impl;
|
||||||
return log;
|
return log;
|
||||||
|
|
|
@ -68,7 +68,7 @@ LogHandle* getLogHandle(stasis_log_t* log) {
|
||||||
}
|
}
|
||||||
|
|
||||||
LogHandle* getLSNHandle(stasis_log_t * log, lsn_t lsn) {
|
LogHandle* getLSNHandle(stasis_log_t * log, lsn_t lsn) {
|
||||||
LogHandle* ret = stasis_malloc(1, LogHandle);
|
LogHandle* ret = stasis_alloc(LogHandle);
|
||||||
ret->next_offset = lsn;
|
ret->next_offset = lsn;
|
||||||
ret->prev_offset = lsn;
|
ret->prev_offset = lsn;
|
||||||
ret->last = 0;
|
ret->last = 0;
|
||||||
|
|
|
@ -86,7 +86,7 @@ stasis_log_reordering_handle_open(stasis_transaction_table_entry_t * l,
|
||||||
size_t chunk_len,
|
size_t chunk_len,
|
||||||
size_t max_len,
|
size_t max_len,
|
||||||
size_t max_size) {
|
size_t max_size) {
|
||||||
stasis_log_reordering_handle_t * ret = stasis_malloc(1, stasis_log_reordering_handle_t);
|
stasis_log_reordering_handle_t * ret = stasis_alloc(stasis_log_reordering_handle_t);
|
||||||
|
|
||||||
ret->l = l;
|
ret->l = l;
|
||||||
ret->log = log;
|
ret->log = log;
|
||||||
|
@ -94,7 +94,7 @@ stasis_log_reordering_handle_open(stasis_transaction_table_entry_t * l,
|
||||||
pthread_cond_init(&ret->done,0);
|
pthread_cond_init(&ret->done,0);
|
||||||
pthread_cond_init(&ret->ready,0);
|
pthread_cond_init(&ret->ready,0);
|
||||||
ret->closed = 0;
|
ret->closed = 0;
|
||||||
ret->queue = malloc(sizeof(stasis_log_reordering_op_t)*max_len);
|
ret->queue = stasis_malloc(max_len, stasis_log_reordering_op_t);
|
||||||
ret->chunk_len = chunk_len;
|
ret->chunk_len = chunk_len;
|
||||||
ret->max_len = max_len;
|
ret->max_len = max_len;
|
||||||
ret->cur_off = 0;
|
ret->cur_off = 0;
|
||||||
|
|
|
@ -840,7 +840,7 @@ stasis_log_t* stasis_log_safe_writes_open(const char * filename,
|
||||||
isDurable_LogWriter, // is_durable
|
isDurable_LogWriter, // is_durable
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_log_safe_writes_state * sw = stasis_malloc(1, stasis_log_safe_writes_state);
|
stasis_log_safe_writes_state * sw = stasis_alloc(stasis_log_safe_writes_state);
|
||||||
sw->filename = strdup(filename);
|
sw->filename = strdup(filename);
|
||||||
{
|
{
|
||||||
char * log_scratch_filename = malloc(strlen(sw->filename) + 2);
|
char * log_scratch_filename = malloc(strlen(sw->filename) + 2);
|
||||||
|
@ -852,7 +852,7 @@ stasis_log_t* stasis_log_safe_writes_open(const char * filename,
|
||||||
sw->fileperm = fileperm;
|
sw->fileperm = fileperm;
|
||||||
sw->softcommit = softcommit;
|
sw->softcommit = softcommit;
|
||||||
|
|
||||||
stasis_log_t* log = stasis_malloc(1, stasis_log_t);
|
stasis_log_t* log = stasis_alloc(stasis_log_t);
|
||||||
memcpy(log,&proto, sizeof(proto));
|
memcpy(log,&proto, sizeof(proto));
|
||||||
log->impl = sw;
|
log->impl = sw;
|
||||||
|
|
||||||
|
|
|
@ -212,7 +212,7 @@ int stasis_alloc_callback(int xid, void * arg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_alloc_t* stasis_alloc_init(stasis_transaction_table_t * tbl, stasis_allocation_policy_t * allocPolicy) {
|
stasis_alloc_t* stasis_alloc_init(stasis_transaction_table_t * tbl, stasis_allocation_policy_t * allocPolicy) {
|
||||||
stasis_alloc_t * alloc = stasis_malloc(1, stasis_alloc_t);
|
stasis_alloc_t * alloc = stasis_alloc(stasis_alloc_t);
|
||||||
alloc->lastFreepage = PAGEID_T_MAX;
|
alloc->lastFreepage = PAGEID_T_MAX;
|
||||||
alloc->allocPolicy = allocPolicy;
|
alloc->allocPolicy = allocPolicy;
|
||||||
pthread_mutex_init(&alloc->mut, 0);
|
pthread_mutex_init(&alloc->mut, 0);
|
||||||
|
@ -425,7 +425,7 @@ void Tdealloc(int xid, recordid rid) {
|
||||||
|
|
||||||
if(type == NORMAL_SLOT) { type = size; }
|
if(type == NORMAL_SLOT) { type = size; }
|
||||||
|
|
||||||
byte * preimage = malloc(sizeof(alloc_arg)+size);
|
byte * preimage = (byte*)stasis_malloc_trailing_array(alloc_arg, size);
|
||||||
|
|
||||||
((alloc_arg*)preimage)->slot = rid.slot;
|
((alloc_arg*)preimage)->slot = rid.slot;
|
||||||
((alloc_arg*)preimage)->type = type;
|
((alloc_arg*)preimage)->type = type;
|
||||||
|
|
|
@ -66,7 +66,7 @@ static int cmp_slot(int xid, Page *p, slotid_t slot, byte * key, size_t keySize,
|
||||||
recordid rid = {p->id, slot, 0};
|
recordid rid = {p->id, slot, 0};
|
||||||
rid.size = stasis_record_length_read(xid, p, rid);
|
rid.size = stasis_record_length_read(xid, p, rid);
|
||||||
if(rid.size == INVALID_SLOT) { return 1; } // treat invalid slots as infinity.
|
if(rid.size == INVALID_SLOT) { return 1; } // treat invalid slots as infinity.
|
||||||
byte * cur = malloc(rid.size);
|
byte * cur = stasis_malloc(rid.size, byte);
|
||||||
stasis_record_read(xid, p, rid, cur);
|
stasis_record_read(xid, p, rid, cur);
|
||||||
byte * cur_ptr;
|
byte * cur_ptr;
|
||||||
size_t cur_len;
|
size_t cur_len;
|
||||||
|
@ -149,7 +149,7 @@ int TbtreeLookup(int xid, recordid rid, void * cmp_arg, byte * key, size_t keySi
|
||||||
btree_leaf_pair * buf = malloc(slotrid.size);
|
btree_leaf_pair * buf = malloc(slotrid.size);
|
||||||
stasis_record_read(xid, p, slotrid, (byte*)buf);
|
stasis_record_read(xid, p, slotrid, (byte*)buf);
|
||||||
*valueSize = slotrid.size - (buf->keylen + sizeof(btree_leaf_pair));
|
*valueSize = slotrid.size - (buf->keylen + sizeof(btree_leaf_pair));
|
||||||
*value = malloc(*valueSize);
|
*value = stasis_malloc(*valueSize, byte);
|
||||||
memcpy(*value, ((byte*)(buf+1))+buf->keylen, *valueSize);
|
memcpy(*value, ((byte*)(buf+1))+buf->keylen, *valueSize);
|
||||||
unlock(p->rwlatch);
|
unlock(p->rwlatch);
|
||||||
releasePage(p);
|
releasePage(p);
|
||||||
|
|
|
@ -456,9 +456,9 @@ typedef struct {
|
||||||
} lladd_linearHashNTA_generic_it;
|
} lladd_linearHashNTA_generic_it;
|
||||||
|
|
||||||
lladdIterator_t * ThashGenericIterator(int xid, recordid hash) {
|
lladdIterator_t * ThashGenericIterator(int xid, recordid hash) {
|
||||||
lladdIterator_t * ret = stasis_malloc(1, lladdIterator_t);
|
lladdIterator_t * ret = stasis_alloc(lladdIterator_t);
|
||||||
ret->type = LINEAR_HASH_NTA_ITERATOR;
|
ret->type = LINEAR_HASH_NTA_ITERATOR;
|
||||||
ret->impl = stasis_malloc(1, lladd_linearHashNTA_generic_it);
|
ret->impl = stasis_alloc(lladd_linearHashNTA_generic_it);
|
||||||
|
|
||||||
((lladd_linearHashNTA_generic_it*)(ret->impl))->hit = ThashIterator(xid, hash, -1, -1);
|
((lladd_linearHashNTA_generic_it*)(ret->impl))->hit = ThashIterator(xid, hash, -1, -1);
|
||||||
((lladd_linearHashNTA_generic_it*)(ret->impl))->lastKey = NULL;
|
((lladd_linearHashNTA_generic_it*)(ret->impl))->lastKey = NULL;
|
||||||
|
|
|
@ -104,7 +104,7 @@ int TlinkedListInsert(int xid, recordid list, const byte * key, int keySize, con
|
||||||
ret = TlinkedListRemove(xid, list, key, keySize);
|
ret = TlinkedListRemove(xid, list, key, keySize);
|
||||||
} end_ret(compensation_error()); */
|
} end_ret(compensation_error()); */
|
||||||
|
|
||||||
stasis_linked_list_insert_log * undoLog = malloc(sizeof(stasis_linked_list_insert_log) + keySize);
|
stasis_linked_list_insert_log * undoLog = stasis_malloc_trailing_array(stasis_linked_list_insert_log, keySize);
|
||||||
|
|
||||||
undoLog->list = list;
|
undoLog->list = list;
|
||||||
undoLog->keySize = keySize;
|
undoLog->keySize = keySize;
|
||||||
|
@ -144,7 +144,7 @@ stasis_operation_impl stasis_op_impl_linked_list_remove() {
|
||||||
return o;
|
return o;
|
||||||
}
|
}
|
||||||
static void stasis_linked_list_insert_helper(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
|
static void stasis_linked_list_insert_helper(int xid, recordid list, const byte * key, int keySize, const byte * value, int valueSize) {
|
||||||
stasis_linkedList_entry * entry = malloc(sizeof(stasis_linkedList_entry) + keySize + valueSize);
|
stasis_linkedList_entry * entry = stasis_malloc_trailing_array(stasis_linkedList_entry, keySize + valueSize);
|
||||||
|
|
||||||
Tread(xid, list, entry);
|
Tread(xid, list, entry);
|
||||||
if(!entry->next.size) {
|
if(!entry->next.size) {
|
||||||
|
@ -155,7 +155,7 @@ static void stasis_linked_list_insert_helper(int xid, recordid list, const byte
|
||||||
entry->next.size = -1;
|
entry->next.size = -1;
|
||||||
Tset(xid, list, entry);
|
Tset(xid, list, entry);
|
||||||
} else {
|
} else {
|
||||||
stasis_linkedList_entry * newEntry = malloc(sizeof(stasis_linkedList_entry) + keySize + valueSize);
|
stasis_linkedList_entry * newEntry = stasis_malloc_trailing_array(stasis_linkedList_entry, keySize + valueSize);
|
||||||
memcpy(newEntry + 1, key, keySize);
|
memcpy(newEntry + 1, key, keySize);
|
||||||
memcpy(((byte*)(newEntry+1))+keySize, value, valueSize);
|
memcpy(((byte*)(newEntry+1))+keySize, value, valueSize);
|
||||||
newEntry->next = entry->next;
|
newEntry->next = entry->next;
|
||||||
|
@ -189,7 +189,7 @@ int TlinkedListFind(int xid, recordid list, const byte * key, int keySize, byte
|
||||||
if(!memcmp(entry + 1, key, keySize)) {
|
if(!memcmp(entry + 1, key, keySize)) {
|
||||||
// Bucket contains the entry of interest.
|
// Bucket contains the entry of interest.
|
||||||
int valueSize = list.size - (sizeof(stasis_linkedList_entry) + keySize);
|
int valueSize = list.size - (sizeof(stasis_linkedList_entry) + keySize);
|
||||||
*value = malloc(valueSize);
|
*value = stasis_malloc(valueSize, byte);
|
||||||
memcpy(*value, ((byte*)(entry+1))+keySize, valueSize);
|
memcpy(*value, ((byte*)(entry+1))+keySize, valueSize);
|
||||||
done = 1;
|
done = 1;
|
||||||
ret = valueSize;
|
ret = valueSize;
|
||||||
|
@ -356,7 +356,7 @@ void TlinkedListDelete(int xid, recordid list) {
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_linkedList_iterator * TlinkedListIterator(int xid, recordid list, int keySize, int valueSize) {
|
stasis_linkedList_iterator * TlinkedListIterator(int xid, recordid list, int keySize, int valueSize) {
|
||||||
stasis_linkedList_iterator * it = malloc(sizeof(stasis_linkedList_iterator));
|
stasis_linkedList_iterator * it = stasis_alloc(stasis_linkedList_iterator);
|
||||||
it->keySize = keySize;
|
it->keySize = keySize;
|
||||||
it->valueSize = valueSize;
|
it->valueSize = valueSize;
|
||||||
it->next = list;
|
it->next = list;
|
||||||
|
@ -411,8 +411,8 @@ int TlinkedListNext(int xid, stasis_linkedList_iterator * it, byte ** key, int *
|
||||||
if(entry->next.size) {
|
if(entry->next.size) {
|
||||||
*keySize = it->keySize;
|
*keySize = it->keySize;
|
||||||
*valueSize = it->valueSize;
|
*valueSize = it->valueSize;
|
||||||
*key = malloc(*keySize);
|
*key = stasis_malloc(*keySize, byte);
|
||||||
*value = malloc(*valueSize);
|
*value = stasis_malloc(*valueSize, byte);
|
||||||
|
|
||||||
it->next = entry->next;
|
it->next = entry->next;
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ static int findInBucket(int xid, recordid hashRid, int bucket_number, const void
|
||||||
static int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key, int keySize, void * val, int valSize) {
|
static int findInBucket(int xid, recordid hashRid, int bucket_number, const void * key, int keySize, void * val, int valSize) {
|
||||||
int found;
|
int found;
|
||||||
|
|
||||||
hashEntry * e = malloc(sizeof(hashEntry) + keySize + valSize);
|
hashEntry * e = stasis_malloc_trailing_array(hashEntry, keySize + valSize);
|
||||||
|
|
||||||
recordid nextEntry;
|
recordid nextEntry;
|
||||||
|
|
||||||
|
@ -306,8 +306,8 @@ static int deleteFromBucket(int xid, recordid hash, int bucket_number, hashEntry
|
||||||
|
|
||||||
if(bucket_contents->next.size == -1) { return 0; }
|
if(bucket_contents->next.size == -1) { return 0; }
|
||||||
|
|
||||||
hashEntry * A = malloc(sizeof(hashEntry) + keySize + valSize);
|
hashEntry * A = stasis_malloc_trailing_array(hashEntry, keySize + valSize);
|
||||||
hashEntry * B = malloc(sizeof(hashEntry) + keySize + valSize);
|
hashEntry * B = stasis_malloc_trailing_array(hashEntry, keySize + valSize);
|
||||||
|
|
||||||
recordid Aaddr, Baddr;
|
recordid Aaddr, Baddr;
|
||||||
|
|
||||||
|
@ -432,7 +432,7 @@ int TnaiveHashDelete(int xid, recordid hashRid,
|
||||||
recordid deleteMe;
|
recordid deleteMe;
|
||||||
hashRid.slot = bucket_number;
|
hashRid.slot = bucket_number;
|
||||||
|
|
||||||
hashEntry * bucket_contents = (hashEntry*)malloc(sizeof(hashEntry) + keySize + valSize);
|
hashEntry * bucket_contents = stasis_malloc_trailing_array(hashEntry, keySize + valSize);
|
||||||
assert(hashRid.size == sizeof(hashEntry) + keySize + valSize);
|
assert(hashRid.size == sizeof(hashEntry) + keySize + valSize);
|
||||||
Tread(xid, hashRid, bucket_contents);
|
Tread(xid, hashRid, bucket_contents);
|
||||||
hashRid.slot = 0;
|
hashRid.slot = 0;
|
||||||
|
@ -448,7 +448,7 @@ int TnaiveHashDelete(int xid, recordid hashRid,
|
||||||
}
|
}
|
||||||
|
|
||||||
int TnaiveHashOpen(int xid, recordid hashRid, int keySize, int valSize) {
|
int TnaiveHashOpen(int xid, recordid hashRid, int keySize, int valSize) {
|
||||||
recordid * headerRidB = (recordid*)malloc(sizeof(recordid) + keySize + valSize);
|
recordid * headerRidB = stasis_malloc_trailing_array(recordid, keySize + valSize);
|
||||||
hashRid.slot = 1;
|
hashRid.slot = 1;
|
||||||
Tread(xid, hashRid, headerRidB);
|
Tread(xid, hashRid, headerRidB);
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list) {
|
||||||
assert(list.size == sizeof(pagedListHeader));
|
assert(list.size == sizeof(pagedListHeader));
|
||||||
Tread(xid, list, &header);
|
Tread(xid, list, &header);
|
||||||
|
|
||||||
lladd_pagedList_iterator * it = stasis_malloc(1, lladd_pagedList_iterator);
|
lladd_pagedList_iterator * it = stasis_alloc(lladd_pagedList_iterator);
|
||||||
|
|
||||||
it->headerRid = header.nextPage;
|
it->headerRid = header.nextPage;
|
||||||
it->entryRid = list;
|
it->entryRid = list;
|
||||||
|
|
|
@ -381,10 +381,10 @@ block_t genericBlock = {
|
||||||
};
|
};
|
||||||
|
|
||||||
block_t* stasis_block_first_default_impl(int xid, Page * p) {
|
block_t* stasis_block_first_default_impl(int xid, Page * p) {
|
||||||
block_t* ret = stasis_malloc(1, block_t);
|
block_t* ret = stasis_alloc(block_t);
|
||||||
*ret = genericBlock;
|
*ret = genericBlock;
|
||||||
genericBlockImpl impl = { p, NULLRID };
|
genericBlockImpl impl = { p, NULLRID };
|
||||||
ret->impl = stasis_malloc(1, genericBlockImpl);
|
ret->impl = stasis_alloc(genericBlockImpl);
|
||||||
*(genericBlockImpl*)(ret->impl) = impl;
|
*(genericBlockImpl*)(ret->impl) = impl;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ static void phClose(stasis_page_handle_t * ph) {
|
||||||
free(ph);
|
free(ph);
|
||||||
}
|
}
|
||||||
static stasis_page_handle_t * phDup(stasis_page_handle_t * ph, int is_sequential) {
|
static stasis_page_handle_t * phDup(stasis_page_handle_t * ph, int is_sequential) {
|
||||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
stasis_page_handle_t * ret = stasis_alloc(stasis_page_handle_t);
|
||||||
memcpy(ret, ph, sizeof(*ret));
|
memcpy(ret, ph, sizeof(*ret));
|
||||||
ret->impl = ((stasis_handle_t*)ret->impl)->dup(ret->impl);
|
ret->impl = ((stasis_handle_t*)ret->impl)->dup(ret->impl);
|
||||||
if(((stasis_handle_t*)ret->impl)->error != 0) {
|
if(((stasis_handle_t*)ret->impl)->error != 0) {
|
||||||
|
@ -97,7 +97,7 @@ static stasis_page_handle_t * phDup(stasis_page_handle_t * ph, int is_sequential
|
||||||
stasis_page_handle_t * stasis_page_handle_open(stasis_handle_t * handle,
|
stasis_page_handle_t * stasis_page_handle_open(stasis_handle_t * handle,
|
||||||
stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||||
DEBUG("Using pageHandle implementation\n");
|
DEBUG("Using pageHandle implementation\n");
|
||||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
stasis_page_handle_t * ret = stasis_alloc(stasis_page_handle_t);
|
||||||
ret->write = phWrite;
|
ret->write = phWrite;
|
||||||
ret->read = phRead;
|
ret->read = phRead;
|
||||||
ret->prefetch_range = phPrefetchRange;
|
ret->prefetch_range = phPrefetchRange;
|
||||||
|
|
|
@ -67,7 +67,7 @@ static void stasis_recovery_analysis(stasis_log_t* log, stasis_transaction_table
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if(xactLSN == NULL) {
|
if(xactLSN == NULL) {
|
||||||
xactLSN = stasis_malloc(1, lsn_t);
|
xactLSN = stasis_alloc(lsn_t);
|
||||||
lhinsert(transactionLSN, &(e->xid), sizeof(int), xactLSN);
|
lhinsert(transactionLSN, &(e->xid), sizeof(int), xactLSN);
|
||||||
} else {
|
} else {
|
||||||
/* We've seen this xact before, and must have put a value in
|
/* We've seen this xact before, and must have put a value in
|
||||||
|
|
|
@ -115,8 +115,8 @@ static void clockInsert (struct replacementPolicy* impl, Page* page) {
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementPolicy* replacementPolicyClockInit(Page * pageArray, int page_count) {
|
replacementPolicy* replacementPolicyClockInit(Page * pageArray, int page_count) {
|
||||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
replacementPolicy *ret = stasis_alloc(replacementPolicy);
|
||||||
stasis_replacement_policy_clock_t * clock = stasis_malloc(1, stasis_replacement_policy_clock_t);
|
stasis_replacement_policy_clock_t * clock = stasis_alloc(stasis_replacement_policy_clock_t);
|
||||||
clock->pages = pageArray;
|
clock->pages = pageArray;
|
||||||
clock->page_count = page_count;
|
clock->page_count = page_count;
|
||||||
clock->ptr = 0;
|
clock->ptr = 0;
|
||||||
|
|
|
@ -144,8 +144,8 @@ static void cwInsert (struct replacementPolicy* impl, Page* page) {
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementPolicy* replacementPolicyConcurrentWrapperInit(replacementPolicy** rp, int count) {
|
replacementPolicy* replacementPolicyConcurrentWrapperInit(replacementPolicy** rp, int count) {
|
||||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
replacementPolicy *ret = stasis_alloc(replacementPolicy);
|
||||||
stasis_replacement_policy_concurrent_wrapper_t * rpw = stasis_malloc(1, stasis_replacement_policy_concurrent_wrapper_t);
|
stasis_replacement_policy_concurrent_wrapper_t * rpw = stasis_alloc(stasis_replacement_policy_concurrent_wrapper_t);
|
||||||
|
|
||||||
if(stasis_replacement_policy_concurrent_wrapper_power_of_two_buckets) {
|
if(stasis_replacement_policy_concurrent_wrapper_power_of_two_buckets) {
|
||||||
// ensure that count is a power of two.
|
// ensure that count is a power of two.
|
||||||
|
|
|
@ -77,7 +77,7 @@ static Page* stasis_replacement_policy_lru_get_stale_and_remove(replacementPolic
|
||||||
|
|
||||||
static void stasis_replacement_policy_lru_insert(replacementPolicy* r, Page* p) {
|
static void stasis_replacement_policy_lru_insert(replacementPolicy* r, Page* p) {
|
||||||
stasis_replacement_policy_lru_t * l = r->impl;
|
stasis_replacement_policy_lru_t * l = r->impl;
|
||||||
stasis_replacement_policy_lru_entry * e = stasis_malloc(1, stasis_replacement_policy_lru_entry);
|
stasis_replacement_policy_lru_entry * e = stasis_alloc(stasis_replacement_policy_lru_entry);
|
||||||
e->value = p;
|
e->value = p;
|
||||||
e->clock = l->now;
|
e->clock = l->now;
|
||||||
l->now++;
|
l->now++;
|
||||||
|
@ -87,8 +87,8 @@ static void stasis_replacement_policy_lru_insert(replacementPolicy* r, Page* p)
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementPolicy * stasis_replacement_policy_lru_init() {
|
replacementPolicy * stasis_replacement_policy_lru_init() {
|
||||||
replacementPolicy * ret = stasis_malloc(1, replacementPolicy);
|
replacementPolicy * ret = stasis_alloc(replacementPolicy);
|
||||||
stasis_replacement_policy_lru_t * l = stasis_malloc(1, stasis_replacement_policy_lru_t);
|
stasis_replacement_policy_lru_t * l = stasis_alloc(stasis_replacement_policy_lru_t);
|
||||||
l->now = 0;
|
l->now = 0;
|
||||||
l->hash = LH_ENTRY(create)(10);
|
l->hash = LH_ENTRY(create)(10);
|
||||||
l->lru = RB_ENTRY(init)(stasis_replacement_policy_lru_entry_cmp, 0);
|
l->lru = RB_ENTRY(init)(stasis_replacement_policy_lru_entry_cmp, 0);
|
||||||
|
|
|
@ -110,14 +110,14 @@ static void stasis_lru_fast_deinit(struct replacementPolicy * r) {
|
||||||
free(r);
|
free(r);
|
||||||
}
|
}
|
||||||
replacementPolicy * lruFastInit() {
|
replacementPolicy * lruFastInit() {
|
||||||
struct replacementPolicy * ret = stasis_malloc(1, struct replacementPolicy);
|
struct replacementPolicy * ret = stasis_alloc(struct replacementPolicy);
|
||||||
ret->deinit = stasis_lru_fast_deinit;
|
ret->deinit = stasis_lru_fast_deinit;
|
||||||
ret->hit = stasis_lru_fast_hit;
|
ret->hit = stasis_lru_fast_hit;
|
||||||
ret->getStale = stasis_lru_fast_getStale;
|
ret->getStale = stasis_lru_fast_getStale;
|
||||||
ret->remove = stasis_lru_fast_remove;
|
ret->remove = stasis_lru_fast_remove;
|
||||||
ret->getStaleAndRemove = stasis_lru_fast_getStaleAndRemove;
|
ret->getStaleAndRemove = stasis_lru_fast_getStaleAndRemove;
|
||||||
ret->insert = stasis_lru_fast_insert;
|
ret->insert = stasis_lru_fast_insert;
|
||||||
lruFast * l = stasis_malloc(1, lruFast);
|
lruFast * l = stasis_alloc(lruFast);
|
||||||
llInit(&l->list);
|
llInit(&l->list);
|
||||||
ret->impl = l;
|
ret->impl = l;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -52,8 +52,8 @@ static void tsInsert (struct replacementPolicy* impl, Page* page) {
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementPolicy* replacementPolicyThreadsafeWrapperInit(replacementPolicy* rp) {
|
replacementPolicy* replacementPolicyThreadsafeWrapperInit(replacementPolicy* rp) {
|
||||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
replacementPolicy *ret = stasis_alloc(replacementPolicy);
|
||||||
stasis_replacement_policy_threadsafe_wrapper_t * rpw = stasis_malloc(1, stasis_replacement_policy_threadsafe_wrapper_t);
|
stasis_replacement_policy_threadsafe_wrapper_t * rpw = stasis_alloc(stasis_replacement_policy_threadsafe_wrapper_t);
|
||||||
rpw->impl = rp;
|
rpw->impl = rp;
|
||||||
pthread_mutex_init(&rpw->mut,0);
|
pthread_mutex_init(&rpw->mut,0);
|
||||||
ret->init = NULL;
|
ret->init = NULL;
|
||||||
|
|
|
@ -172,7 +172,7 @@ int stasis_transaction_table_set_argument(stasis_transaction_table_t *tbl, int x
|
||||||
}
|
}
|
||||||
|
|
||||||
int* stasis_transaction_table_list_active(stasis_transaction_table_t *tbl, int *count) {
|
int* stasis_transaction_table_list_active(stasis_transaction_table_t *tbl, int *count) {
|
||||||
int * ret = stasis_malloc(1, int);
|
int * ret = stasis_alloc(int);
|
||||||
ret[0] = INVALID_XID;
|
ret[0] = INVALID_XID;
|
||||||
*count = 0;
|
*count = 0;
|
||||||
for(int i = 0; i < MAX_TRANSACTIONS; i++) {
|
for(int i = 0; i < MAX_TRANSACTIONS; i++) {
|
||||||
|
@ -188,7 +188,7 @@ int* stasis_transaction_table_list_active(stasis_transaction_table_t *tbl, int *
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_transaction_table_t * stasis_transaction_table_init() {
|
stasis_transaction_table_t * stasis_transaction_table_init() {
|
||||||
stasis_transaction_table_t * tbl = stasis_malloc(1, stasis_transaction_table_t);
|
stasis_transaction_table_t * tbl = stasis_alloc(stasis_transaction_table_t);
|
||||||
tbl->active_count = 0;
|
tbl->active_count = 0;
|
||||||
|
|
||||||
#ifndef HAVE_GCC_ATOMICS
|
#ifndef HAVE_GCC_ATOMICS
|
||||||
|
@ -307,7 +307,7 @@ stasis_transaction_table_entry_t * stasis_transaction_table_begin(stasis_transac
|
||||||
struct stasis_transaction_table_thread_local_state_t * tls = pthread_getspecific(tbl->key);
|
struct stasis_transaction_table_thread_local_state_t * tls = pthread_getspecific(tbl->key);
|
||||||
|
|
||||||
if(tls == NULL) {
|
if(tls == NULL) {
|
||||||
tls = stasis_malloc(1, struct stasis_transaction_table_thread_local_state_t);
|
tls = stasis_alloc(struct stasis_transaction_table_thread_local_state_t);
|
||||||
tls->last_entry = 0;
|
tls->last_entry = 0;
|
||||||
tls->num_entries = 0;
|
tls->num_entries = 0;
|
||||||
tls->entries = NULL;
|
tls->entries = NULL;
|
||||||
|
|
|
@ -27,7 +27,7 @@ struct stasis_truncation_t {
|
||||||
#endif
|
#endif
|
||||||
stasis_truncation_t * stasis_truncation_init(stasis_dirty_page_table_t * dpt, stasis_transaction_table_t * tbl,
|
stasis_truncation_t * stasis_truncation_init(stasis_dirty_page_table_t * dpt, stasis_transaction_table_t * tbl,
|
||||||
stasis_buffer_manager_t *buffer_manager, stasis_log_t *log) {
|
stasis_buffer_manager_t *buffer_manager, stasis_log_t *log) {
|
||||||
stasis_truncation_t * ret = stasis_malloc(1, stasis_truncation_t);
|
stasis_truncation_t * ret = stasis_alloc(stasis_truncation_t);
|
||||||
ret->initialized = 1;
|
ret->initialized = 1;
|
||||||
ret->automaticallyTruncating = 0;
|
ret->automaticallyTruncating = 0;
|
||||||
pthread_mutex_init(&ret->shutdown_mutex, 0);
|
pthread_mutex_init(&ret->shutdown_mutex, 0);
|
||||||
|
|
|
@ -78,7 +78,7 @@ stasis_bloom_filter_t * stasis_bloom_filter_create(uint64_t(*func_a)(const char*
|
||||||
uint64_t(*func_b)(const char*,int),
|
uint64_t(*func_b)(const char*,int),
|
||||||
uint64_t num_expected_items,
|
uint64_t num_expected_items,
|
||||||
double false_positive_rate) {
|
double false_positive_rate) {
|
||||||
stasis_bloom_filter_t * ret = stasis_malloc(1, stasis_bloom_filter_t);
|
stasis_bloom_filter_t * ret = stasis_alloc(stasis_bloom_filter_t);
|
||||||
ret->func_a = func_a;
|
ret->func_a = func_a;
|
||||||
ret->func_b = func_b;
|
ret->func_b = func_b;
|
||||||
ret->num_expected_items = num_expected_items;
|
ret->num_expected_items = num_expected_items;
|
||||||
|
|
|
@ -422,7 +422,7 @@ hashtable_t * hashtable_init(pageid_t size) {
|
||||||
size /= 2;
|
size /= 2;
|
||||||
newsize *= 2;
|
newsize *= 2;
|
||||||
}
|
}
|
||||||
hashtable_t *ht = stasis_malloc(1, hashtable_t);
|
hashtable_t *ht = stasis_alloc(hashtable_t);
|
||||||
|
|
||||||
ht->maxbucketid = (newsize) - 1;
|
ht->maxbucketid = (newsize) - 1;
|
||||||
ht->buckets = calloc(ht->maxbucketid+1, sizeof(bucket_t));
|
ht->buckets = calloc(ht->maxbucketid+1, sizeof(bucket_t));
|
||||||
|
|
|
@ -15,15 +15,15 @@ static inline struct LL_ENTRY(node_t)* LL_ENTRY(shiftNode) (struct LL_ENTRY(list
|
||||||
static inline void LL_ENTRY(removeNode)(list * l, node_t * n);
|
static inline void LL_ENTRY(removeNode)(list * l, node_t * n);
|
||||||
|
|
||||||
list * LL_ENTRY(create)(node_t*(*getNode)(value_t*v,void*conf), void(*setNode)(value_t*v,node_t*n,void*conf),void*conf) {
|
list * LL_ENTRY(create)(node_t*(*getNode)(value_t*v,void*conf), void(*setNode)(value_t*v,node_t*n,void*conf),void*conf) {
|
||||||
list* ret = stasis_malloc(1, list);
|
list* ret = stasis_alloc(list);
|
||||||
|
|
||||||
// bypass const annotation on head, tail...
|
// bypass const annotation on head, tail...
|
||||||
list tmp = {
|
list tmp = {
|
||||||
getNode,
|
getNode,
|
||||||
setNode,
|
setNode,
|
||||||
conf,
|
conf,
|
||||||
stasis_malloc(1,node_t),
|
stasis_alloc(node_t),
|
||||||
stasis_malloc(1,node_t)
|
stasis_alloc(node_t)
|
||||||
};
|
};
|
||||||
memcpy(ret, &tmp, sizeof(list));
|
memcpy(ret, &tmp, sizeof(list));
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ void LL_ENTRY(destroy)(list* l) {
|
||||||
free(l);
|
free(l);
|
||||||
}
|
}
|
||||||
int LL_ENTRY(push)(list* l, value_t * v) {
|
int LL_ENTRY(push)(list* l, value_t * v) {
|
||||||
node_t * n = stasis_malloc(1, node_t);
|
node_t * n = stasis_alloc(node_t);
|
||||||
if(!n) { return ENOMEM; }
|
if(!n) { return ENOMEM; }
|
||||||
n->v = v;
|
n->v = v;
|
||||||
assert(l->getNode(v, l->conf) == 0);
|
assert(l->getNode(v, l->conf) == 0);
|
||||||
|
@ -65,7 +65,7 @@ value_t* LL_ENTRY(pop) (list* l) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
int LL_ENTRY(unshift)(list* l, value_t * v) {
|
int LL_ENTRY(unshift)(list* l, value_t * v) {
|
||||||
node_t * n = stasis_malloc(1, node_t);
|
node_t * n = stasis_alloc(node_t);
|
||||||
if(!n) { return ENOMEM; }
|
if(!n) { return ENOMEM; }
|
||||||
n->v = v;
|
n->v = v;
|
||||||
assert(l->getNode(v, l->conf) == 0);
|
assert(l->getNode(v, l->conf) == 0);
|
||||||
|
|
|
@ -135,7 +135,7 @@ static struct LH_ENTRY(pair_t)* insertIntoLinkedList(struct LH_ENTRY(table) * t
|
||||||
thePair->value = value;
|
thePair->value = value;
|
||||||
} else {
|
} else {
|
||||||
// the bucket isn't empty.
|
// the bucket isn't empty.
|
||||||
thePair = stasis_malloc(1, struct LH_ENTRY(pair_t));
|
thePair = stasis_alloc(struct LH_ENTRY(pair_t));
|
||||||
thePair->key = (const LH_ENTRY(key_t) *)malloc(len);
|
thePair->key = (const LH_ENTRY(key_t) *)malloc(len);
|
||||||
memcpy((void*)thePair->key, key, len);
|
memcpy((void*)thePair->key, key, len);
|
||||||
thePair->keyLength = len;
|
thePair->keyLength = len;
|
||||||
|
@ -220,7 +220,7 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
||||||
|
|
||||||
|
|
||||||
struct LH_ENTRY(table) * LH_ENTRY(create)(int initialSize) {
|
struct LH_ENTRY(table) * LH_ENTRY(create)(int initialSize) {
|
||||||
struct LH_ENTRY(table) * ret = stasis_malloc(1, struct LH_ENTRY(table));
|
struct LH_ENTRY(table) * ret = stasis_alloc(struct LH_ENTRY(table));
|
||||||
ret->bucketList = calloc(initialSize, sizeof(struct LH_ENTRY(pair_t)));
|
ret->bucketList = calloc(initialSize, sizeof(struct LH_ENTRY(pair_t)));
|
||||||
HASH_ENTRY(_get_size_params)(initialSize,
|
HASH_ENTRY(_get_size_params)(initialSize,
|
||||||
&(ret->bucketListBits),
|
&(ret->bucketListBits),
|
||||||
|
@ -466,7 +466,7 @@ void * pblHtFirst ( pblHashTable_t * h ) {
|
||||||
if(pblLists == 0) {
|
if(pblLists == 0) {
|
||||||
pblLists = LH_ENTRY(create)(10);
|
pblLists = LH_ENTRY(create)(10);
|
||||||
}
|
}
|
||||||
struct LH_ENTRY(list) *list = stasis_malloc(1, struct LH_ENTRY(list));
|
struct LH_ENTRY(list) *list = stasis_alloc(struct LH_ENTRY(list));
|
||||||
struct LH_ENTRY(list) * oldList;
|
struct LH_ENTRY(list) * oldList;
|
||||||
|
|
||||||
if((oldList = LH_ENTRY(insert)(pblLists,
|
if((oldList = LH_ENTRY(insert)(pblLists,
|
||||||
|
|
|
@ -56,7 +56,7 @@ void printList(LinkedList **l) {
|
||||||
printf (".\n");
|
printf (".\n");
|
||||||
}
|
}
|
||||||
void addVal(LinkedList **list, long val) {
|
void addVal(LinkedList **list, long val) {
|
||||||
LinkedList * node = stasis_malloc(1, LinkedList);
|
LinkedList * node = stasis_alloc(LinkedList);
|
||||||
node->val = val;
|
node->val = val;
|
||||||
node->next = NULL;
|
node->next = NULL;
|
||||||
if (*list==NULL) {
|
if (*list==NULL) {
|
||||||
|
@ -105,7 +105,7 @@ long popMaxVal(LinkedList **list) {
|
||||||
void addSortedVal(LinkedList **list, long val) {
|
void addSortedVal(LinkedList **list, long val) {
|
||||||
LinkedList * tmp;
|
LinkedList * tmp;
|
||||||
LinkedList * tmpprev;
|
LinkedList * tmpprev;
|
||||||
LinkedList * node = stasis_malloc(1, LinkedList);
|
LinkedList * node = stasis_alloc(LinkedList);
|
||||||
node->val = val;
|
node->val = val;
|
||||||
/*see if new entry should come in the beginning*/
|
/*see if new entry should come in the beginning*/
|
||||||
if ((*list==NULL) || ((*list)->val<val)) {
|
if ((*list==NULL) || ((*list)->val<val)) {
|
||||||
|
|
|
@ -24,7 +24,7 @@ static void free_key(void * key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_aggregate_min_t * stasis_aggregate_min_init(int large) {
|
stasis_aggregate_min_t * stasis_aggregate_min_init(int large) {
|
||||||
stasis_aggregate_min_t * ret = stasis_malloc(1, stasis_aggregate_min_t);
|
stasis_aggregate_min_t * ret = stasis_alloc(stasis_aggregate_min_t);
|
||||||
if(large) {
|
if(large) {
|
||||||
ret->tree = rbinit(cmp_lsn_t,0);
|
ret->tree = rbinit(cmp_lsn_t,0);
|
||||||
} else {
|
} else {
|
||||||
|
@ -58,7 +58,7 @@ void stasis_aggregate_min_add(stasis_aggregate_min_t * min, lsn_t * a) {
|
||||||
}
|
}
|
||||||
lsn_t * p = pthread_getspecific(min->key);
|
lsn_t * p = pthread_getspecific(min->key);
|
||||||
if(!p) {
|
if(!p) {
|
||||||
p = stasis_malloc(1, lsn_t);
|
p = stasis_alloc(lsn_t);
|
||||||
*p = -1;
|
*p = -1;
|
||||||
pthread_setspecific(min->key, p);
|
pthread_setspecific(min->key, p);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,8 +8,8 @@ struct stasis_util_multiset_t {
|
||||||
};
|
};
|
||||||
|
|
||||||
stasis_util_multiset_t * stasis_util_multiset_create() {
|
stasis_util_multiset_t * stasis_util_multiset_create() {
|
||||||
stasis_util_multiset_t * set = stasis_malloc(1, stasis_util_multiset_t);
|
stasis_util_multiset_t * set = stasis_alloc(stasis_util_multiset_t);
|
||||||
set->items = stasis_malloc(1, lsn_t);
|
set->items = stasis_alloc(lsn_t);
|
||||||
set->item_count = 0;
|
set->item_count = 0;
|
||||||
return set;
|
return set;
|
||||||
}
|
}
|
||||||
|
|
|
@ -272,7 +272,7 @@ stasis_ringbuffer_t * stasis_ringbuffer_init(intptr_t base, lsn_t initial_offset
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
stasis_ringbuffer_t * ring = stasis_malloc(1, stasis_ringbuffer_t);
|
stasis_ringbuffer_t * ring = stasis_alloc(stasis_ringbuffer_t);
|
||||||
|
|
||||||
// Allocate the memory region using mmap black magic.
|
// Allocate the memory region using mmap black magic.
|
||||||
|
|
||||||
|
|
|
@ -25,11 +25,11 @@ struct stasis_util_slab_t {
|
||||||
* @return a slab allocator. Calling stasis_util_slab_destroy() will deallocate it all-at-once.
|
* @return a slab allocator. Calling stasis_util_slab_destroy() will deallocate it all-at-once.
|
||||||
*/
|
*/
|
||||||
stasis_util_slab_t * stasis_util_slab_create(uint32_t obj_sz, uint32_t block_sz) {
|
stasis_util_slab_t * stasis_util_slab_create(uint32_t obj_sz, uint32_t block_sz) {
|
||||||
stasis_util_slab_t* ret = stasis_malloc(1, stasis_util_slab_t);
|
stasis_util_slab_t* ret = stasis_alloc(stasis_util_slab_t);
|
||||||
|
|
||||||
// printf("slab init: obj siz = %lld, block_sz = %lld\n", (long long)obj_sz, (long long)block_sz);
|
// printf("slab init: obj siz = %lld, block_sz = %lld\n", (long long)obj_sz, (long long)block_sz);
|
||||||
|
|
||||||
ret->blocks = stasis_malloc(1, byte*);
|
ret->blocks = stasis_alloc(byte*);
|
||||||
ret->blocks[0] = stasis_malloc(block_sz, byte);
|
ret->blocks[0] = stasis_malloc(block_sz, byte);
|
||||||
ret->freelist_ptr = 0;
|
ret->freelist_ptr = 0;
|
||||||
|
|
||||||
|
|
|
@ -80,7 +80,7 @@ int stasis_util_skiplist_default_key_finalize(void * p, void * ignored) {
|
||||||
static inline int stasis_util_skiplist_random_level(pthread_key_t k) {
|
static inline int stasis_util_skiplist_random_level(pthread_key_t k) {
|
||||||
kiss_table_t * kiss = pthread_getspecific(k);
|
kiss_table_t * kiss = pthread_getspecific(k);
|
||||||
if(kiss == 0) {
|
if(kiss == 0) {
|
||||||
kiss = stasis_malloc(1, kiss_table_t);
|
kiss = stasis_alloc(kiss_table_t);
|
||||||
stasis_util_random_kiss_settable(kiss,
|
stasis_util_random_kiss_settable(kiss,
|
||||||
random(), random(), random(), random(), random(), random());
|
random(), random(), random(), random(), random(), random());
|
||||||
pthread_setspecific(k, kiss);
|
pthread_setspecific(k, kiss);
|
||||||
|
@ -134,7 +134,7 @@ static inline int stasis_util_skiplist_cmp_helper2(
|
||||||
static inline stasis_skiplist_t * stasis_util_skiplist_init(
|
static inline stasis_skiplist_t * stasis_util_skiplist_init(
|
||||||
int (*cmp)(const void*, const void*),
|
int (*cmp)(const void*, const void*),
|
||||||
int (*finalize)(void *, void * nul)) {
|
int (*finalize)(void *, void * nul)) {
|
||||||
stasis_skiplist_t * list = stasis_malloc(1, stasis_skiplist_t);
|
stasis_skiplist_t * list = stasis_alloc(stasis_skiplist_t);
|
||||||
list->levelCap = 32;
|
list->levelCap = 32;
|
||||||
list->h = hazard_init(STASIS_SKIPLIST_HP_COUNT+list->levelCap,
|
list->h = hazard_init(STASIS_SKIPLIST_HP_COUNT+list->levelCap,
|
||||||
STASIS_SKIPLIST_HP_COUNT, 250, stasis_util_skiplist_node_finalize, list);
|
STASIS_SKIPLIST_HP_COUNT, 250, stasis_util_skiplist_node_finalize, list);
|
||||||
|
|
|
@ -127,7 +127,7 @@ static void hazard_deinit_thread(void * p) {
|
||||||
*/
|
*/
|
||||||
static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
||||||
int (*finalizer)(void*, void*), void * conf) {
|
int (*finalizer)(void*, void*), void * conf) {
|
||||||
hazard_t * ret = stasis_malloc(1, hazard_t);
|
hazard_t * ret = stasis_alloc(hazard_t);
|
||||||
pthread_key_create(&ret->hp, hazard_deinit_thread);
|
pthread_key_create(&ret->hp, hazard_deinit_thread);
|
||||||
ret->num_slots = hp_slots;
|
ret->num_slots = hp_slots;
|
||||||
ret->stack_start = stack_start;
|
ret->stack_start = stack_start;
|
||||||
|
@ -142,7 +142,7 @@ static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
||||||
static inline hazard_ptr_rec_t * hazard_ensure_tls(hazard_t * h) {
|
static inline hazard_ptr_rec_t * hazard_ensure_tls(hazard_t * h) {
|
||||||
hazard_ptr_rec_t * rec = pthread_getspecific(h->hp);
|
hazard_ptr_rec_t * rec = pthread_getspecific(h->hp);
|
||||||
if(rec == NULL) {
|
if(rec == NULL) {
|
||||||
rec = stasis_malloc(1, hazard_ptr_rec_t);
|
rec = stasis_alloc(hazard_ptr_rec_t);
|
||||||
rec->hp = calloc(h->num_slots, sizeof(hazard_ptr));
|
rec->hp = calloc(h->num_slots, sizeof(hazard_ptr));
|
||||||
rec->rlist = calloc(h->num_r_slots, sizeof(hazard_ptr));
|
rec->rlist = calloc(h->num_r_slots, sizeof(hazard_ptr));
|
||||||
rec->rlist_len = 0;
|
rec->rlist_len = 0;
|
||||||
|
|
|
@ -66,7 +66,7 @@ static inline void stasis_histogram_insert_log_timeval(stasis_histogram_64_t* hi
|
||||||
|
|
||||||
static inline void stasis_histogram_tick(stasis_histogram_64_t* hist) {
|
static inline void stasis_histogram_tick(stasis_histogram_64_t* hist) {
|
||||||
struct timeval * val = pthread_getspecific(hist->tls);
|
struct timeval * val = pthread_getspecific(hist->tls);
|
||||||
if(!val) { val = stasis_malloc(1, struct timeval); pthread_setspecific(hist->tls, val); }
|
if(!val) { val = stasis_alloc(struct timeval); pthread_setspecific(hist->tls, val); }
|
||||||
gettimeofday(val,0);
|
gettimeofday(val,0);
|
||||||
}
|
}
|
||||||
static inline void stasis_histogram_tock(stasis_histogram_64_t* hist) {
|
static inline void stasis_histogram_tock(stasis_histogram_64_t* hist) {
|
||||||
|
|
|
@ -10,7 +10,9 @@
|
||||||
|
|
||||||
#include <stasis/common.h>
|
#include <stasis/common.h>
|
||||||
|
|
||||||
|
#define stasis_alloc(typ) ((typ*)malloc(sizeof(typ)))
|
||||||
#define stasis_malloc(cnt, typ) ((typ*)malloc((cnt)*sizeof(typ)))
|
#define stasis_malloc(cnt, typ) ((typ*)malloc((cnt)*sizeof(typ)))
|
||||||
|
#define stasis_malloc_trailing_array(typ, array_sz) ((typ*)malloc(sizeof(typ)+(array_sz)))
|
||||||
#define stasis_calloc(cnt, typ) ((typ*)calloc((cnt),sizeof(typ)))
|
#define stasis_calloc(cnt, typ) ((typ*)calloc((cnt),sizeof(typ)))
|
||||||
#define stasis_realloc(ptr, cnt, typ) ((typ*)realloc(ptr, (cnt)*sizeof(typ)))
|
#define stasis_realloc(ptr, cnt, typ) ((typ*)realloc(ptr, (cnt)*sizeof(typ)))
|
||||||
#define stasis_free(ptr) free(ptr)
|
#define stasis_free(ptr) free(ptr)
|
||||||
|
|
|
@ -33,7 +33,7 @@ BEGIN_C_DECLS
|
||||||
typedef pthread_rwlock_t rwl;
|
typedef pthread_rwlock_t rwl;
|
||||||
|
|
||||||
static inline rwl* initlock(void) {
|
static inline rwl* initlock(void) {
|
||||||
rwl* ret = stasis_malloc(1, rwl);
|
rwl* ret = stasis_alloc(rwl);
|
||||||
int err = pthread_rwlock_init(ret, 0);
|
int err = pthread_rwlock_init(ret, 0);
|
||||||
if(err) { perror("couldn't init rwlock"); abort(); }
|
if(err) { perror("couldn't init rwlock"); abort(); }
|
||||||
DEBUG("initlock(%llx)\n", (long long)ret);
|
DEBUG("initlock(%llx)\n", (long long)ret);
|
||||||
|
@ -119,7 +119,7 @@ typedef struct rwlc {
|
||||||
} rwlc;
|
} rwlc;
|
||||||
|
|
||||||
static inline rwlc* rwlc_initlock(void) {
|
static inline rwlc* rwlc_initlock(void) {
|
||||||
rwlc* ret = stasis_malloc(1, rwlc);
|
rwlc* ret = stasis_alloc(rwlc);
|
||||||
ret->rw = initlock();
|
ret->rw = initlock();
|
||||||
int err = pthread_mutex_init(&ret->mut, 0);
|
int err = pthread_mutex_init(&ret->mut, 0);
|
||||||
ret->is_writelocked = 0;
|
ret->is_writelocked = 0;
|
||||||
|
|
Loading…
Reference in a new issue