replace malloc with stasis_malloc in many places
This commit is contained in:
parent
6c7d8d3968
commit
8c2547974a
60 changed files with 195 additions and 193 deletions
|
@ -92,7 +92,7 @@ void * random_op_thread(void * argp) {
|
|||
int do_operations(pageid_t page_count, int num_threads, unsigned long long num_ops, double write_frac, int target_ops) {
|
||||
unsigned long long ops_per_thread = ceil(((double)num_ops) / (double)num_threads);
|
||||
unsigned long long ops_remaining = num_ops;
|
||||
pthread_t * threads = malloc(sizeof(threads[0]) * num_threads);
|
||||
pthread_t * threads = stasis_malloc(num_threads, pthread_t);
|
||||
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv,0);
|
||||
|
@ -100,7 +100,7 @@ int do_operations(pageid_t page_count, int num_threads, unsigned long long num_o
|
|||
|
||||
for(int i = 0; i < num_threads ; i++) {
|
||||
if(ops_remaining <= 0) { num_threads = i; break; }
|
||||
struct thread_arg *a = malloc(sizeof(*a));
|
||||
struct thread_arg *a = stasis_malloc(1, struct thread_arg);
|
||||
a->seed = base_seed + i;
|
||||
a->num_ops = ops_remaining < ops_per_thread ? ops_remaining : ops_per_thread;
|
||||
a->write_frac = write_frac;
|
||||
|
|
|
@ -134,7 +134,7 @@ static void void_double_add(void * val, struct rbtree * a, struct rbtree * b) {
|
|||
static int availablePages_remove(stasis_allocation_policy_t *ap, pageid_t pageid);
|
||||
static int availablePages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||
int ret = availablePages_remove(ap, pageid);
|
||||
availablePages_pageid_freespace* tup= malloc(sizeof(*tup));
|
||||
availablePages_pageid_freespace* tup= stasis_malloc(1, availablePages_pageid_freespace);
|
||||
tup->pageid = pageid;
|
||||
tup->freespace = freespace;
|
||||
void_double_add(tup, ap->availablePages_key_pageid, ap->availablePages_key_freespace_pageid);
|
||||
|
@ -151,7 +151,7 @@ static int pageOwners_add(stasis_allocation_policy_t *ap, int xid, size_t freesp
|
|||
|
||||
int ret = pageOwners_remove(ap, pageid);
|
||||
|
||||
pageOwners_xid_freespace_pageid * tup = malloc(sizeof(*tup));
|
||||
pageOwners_xid_freespace_pageid * tup = stasis_malloc(1, pageOwners_xid_freespace_pageid);
|
||||
tup->xid = xid;
|
||||
tup->freespace = freespace;
|
||||
tup->pageid = pageid;
|
||||
|
@ -202,7 +202,7 @@ static int allPages_lookup_by_pageid(stasis_allocation_policy_t *ap, pageid_t pa
|
|||
}
|
||||
}
|
||||
static int allPages_add(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||
allPages_pageid_freespace * tup = malloc(sizeof(*tup));
|
||||
allPages_pageid_freespace * tup = stasis_malloc(1, allPages_pageid_freespace);
|
||||
tup->pageid = pageid;
|
||||
tup->freespace = freespace;
|
||||
int ret = void_single_add(tup, ap->allPages_key_pageid);
|
||||
|
@ -230,7 +230,7 @@ static void allPages_removeAll(stasis_allocation_policy_t *ap) {
|
|||
}
|
||||
|
||||
static void allPages_set_freespace(stasis_allocation_policy_t *ap, pageid_t pageid, size_t freespace) {
|
||||
allPages_pageid_freespace * tup = malloc(sizeof(*tup));
|
||||
allPages_pageid_freespace * tup = stasis_malloc(1, allPages_pageid_freespace);
|
||||
tup->pageid = pageid;
|
||||
tup->freespace = freespace;
|
||||
int existed = void_single_add(tup, ap->allPages_key_pageid);
|
||||
|
@ -323,7 +323,7 @@ static int xidAllocedDealloced_helper_remove(stasis_allocation_policy_t *ap, str
|
|||
static int xidAllocedDealloced_helper_add(stasis_allocation_policy_t *ap, struct rbtree *first, struct rbtree* second, int xid, pageid_t pageid) {
|
||||
int existed = xidAllocedDealloced_helper_remove(ap, first, second, xid, pageid);
|
||||
|
||||
xidAllocedDealloced_xid_pageid * tup = malloc(sizeof(*tup));
|
||||
xidAllocedDealloced_xid_pageid * tup = stasis_malloc(1, xidAllocedDealloced_xid_pageid);
|
||||
tup->xid = xid;
|
||||
tup->pageid = pageid;
|
||||
void_double_add(tup, first, second);
|
||||
|
@ -454,7 +454,7 @@ static int xidAllocedDealloced_cmp_xid_pageid(const void *ap, const void *bp, co
|
|||
}
|
||||
|
||||
stasis_allocation_policy_t * stasis_allocation_policy_init() {
|
||||
stasis_allocation_policy_t * ap = malloc(sizeof(*ap));
|
||||
stasis_allocation_policy_t * ap = stasis_malloc(1, stasis_allocation_policy_t);
|
||||
ap->availablePages_key_pageid = rbinit(availablePages_cmp_pageid, 0);
|
||||
ap->availablePages_key_freespace_pageid = rbinit(availablePages_cmp_freespace_pageid, 0);
|
||||
ap->pageOwners_key_pageid = rbinit(pageOwners_cmp_pageid, 0);
|
||||
|
|
|
@ -100,7 +100,7 @@ compensated_function Page * __profile_loadPage(int xid, pageid_t pageid, char *
|
|||
int * pins = LH_ENTRY(find)(profile_load_pins_hash, &ret, sizeof(void*));
|
||||
|
||||
if(!pins) {
|
||||
pins = malloc(sizeof(int));
|
||||
pins = stasis_malloc(1, int);
|
||||
*pins = 0;
|
||||
LH_ENTRY(insert)(profile_load_pins_hash, &ret, sizeof(void*), pins);
|
||||
}
|
||||
|
|
|
@ -506,8 +506,8 @@ static int bhCloseHandleImpl(stasis_buffer_manager_t *bm, stasis_buffer_manager_
|
|||
}
|
||||
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||
stasis_buffer_manager_t *bm = malloc(sizeof(*bm));
|
||||
stasis_buffer_hash_t *bh = malloc(sizeof(*bh));
|
||||
stasis_buffer_manager_t *bm = stasis_malloc(1, stasis_buffer_manager_t);
|
||||
stasis_buffer_hash_t *bh = stasis_malloc(1, stasis_buffer_hash_t);
|
||||
|
||||
bm->openHandleImpl = bhOpenHandleImpl;
|
||||
bm->closeHandleImpl = bhCloseHandleImpl;
|
||||
|
@ -568,7 +568,7 @@ stasis_buffer_manager_t* stasis_buffer_manager_hash_open(stasis_page_handle_t *
|
|||
bh->prefetch_next_count = 0;
|
||||
bh->prefetch_next_pageid = 0;
|
||||
|
||||
bh->prefetch_workers = malloc(sizeof(pthread_t) * bh->prefetch_thread_count);
|
||||
bh->prefetch_workers = stasis_malloc(bh->prefetch_thread_count, pthread_t);
|
||||
for(int i = 0; i < bh->prefetch_thread_count; i++) {
|
||||
pthread_create(&bh->prefetch_workers[i], 0, prefetch_worker, bh);
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ static inline stasis_buffer_concurrent_hash_tls_t * populateTLS(stasis_buffer_ma
|
|||
stasis_buffer_concurrent_hash_t *ch = bm->impl;
|
||||
stasis_buffer_concurrent_hash_tls_t *tls = pthread_getspecific(ch->key);
|
||||
if(tls == NULL) {
|
||||
tls = malloc(sizeof(*tls));
|
||||
tls = stasis_malloc(1, stasis_buffer_concurrent_hash_tls_t);
|
||||
tls->p = NULL;
|
||||
tls->bm = bm;
|
||||
pthread_setspecific(ch->key, tls);
|
||||
|
@ -409,8 +409,8 @@ static int chCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
|||
}
|
||||
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_open(stasis_page_handle_t * h, stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||
stasis_buffer_manager_t *bm = malloc(sizeof(*bm));
|
||||
stasis_buffer_concurrent_hash_t *ch = malloc(sizeof(*ch));
|
||||
stasis_buffer_manager_t *bm = stasis_malloc(1, stasis_buffer_manager_t);
|
||||
stasis_buffer_concurrent_hash_t *ch = stasis_malloc(1, stasis_buffer_concurrent_hash_t);
|
||||
bm->openHandleImpl = chOpenHandle;
|
||||
bm->closeHandleImpl = chCloseHandle;
|
||||
bm->loadPageImpl = chLoadPageImpl;
|
||||
|
@ -441,7 +441,7 @@ stasis_buffer_manager_t* stasis_buffer_manager_concurrent_hash_open(stasis_page_
|
|||
|
||||
if(stasis_replacement_policy == STASIS_REPLACEMENT_POLICY_CONCURRENT_LRU) {
|
||||
|
||||
replacementPolicy ** lrus = malloc(sizeof(lrus[0]) * 37);
|
||||
replacementPolicy ** lrus = stasis_malloc(37, replacementPolicy*);
|
||||
for(int i = 0; i < 37; i++) {
|
||||
lrus[i] = lruFastInit();
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ static int bufManCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_
|
|||
|
||||
stasis_buffer_manager_t* stasis_buffer_manager_deprecated_open(stasis_page_handle_t * ph) {
|
||||
page_handle = ph;
|
||||
stasis_buffer_manager_t * bm = malloc(sizeof(*bm));
|
||||
stasis_buffer_manager_t * bm = stasis_malloc(1, stasis_buffer_manager_t);
|
||||
bm->releasePageImpl = bufManReleasePage;
|
||||
bm->openHandleImpl = bufManOpenHandle;
|
||||
bm->closeHandleImpl = bufManCloseHandle;
|
||||
|
|
|
@ -130,7 +130,7 @@ static void pfPageWrite(stasis_page_handle_t * h, Page * ret) {
|
|||
|
||||
/** @todo O_DIRECT is broken in older linuxes (eg 2.4). The build script should disable it on such platforms. */
|
||||
stasis_page_handle_t* openPageFile(stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||
stasis_page_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
||||
ret->read = pfPageRead;
|
||||
ret->write = pfPageWrite;
|
||||
ret->force_file = pfForcePageFile;
|
||||
|
|
|
@ -23,7 +23,7 @@ static Page * paLoadPage(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
|||
}
|
||||
|
||||
if(!pa->pageMap[pageid]) {
|
||||
pa->pageMap[pageid] = malloc(sizeof(Page));
|
||||
pa->pageMap[pageid] = stasis_malloc(1, Page);
|
||||
pa->pageMap[pageid]->id = pageid;
|
||||
pa->pageMap[pageid]->pageType = type == UNKNOWN_TYPE_PAGE ? 0 : type;
|
||||
pa->pageMap[pageid]->LSN = 0;
|
||||
|
@ -82,8 +82,8 @@ static int paCloseHandle(stasis_buffer_manager_t *bm, stasis_buffer_manager_hand
|
|||
|
||||
stasis_buffer_manager_t * stasis_buffer_manager_mem_array_open () {
|
||||
|
||||
stasis_buffer_manager_t * bm = malloc(sizeof(*bm));
|
||||
stasis_buffer_manager_page_array_t * pa = malloc(sizeof(*pa));
|
||||
stasis_buffer_manager_t * bm = stasis_malloc(1, stasis_buffer_manager_t);
|
||||
stasis_buffer_manager_page_array_t * pa = stasis_malloc(1, stasis_buffer_manager_page_array_t);
|
||||
|
||||
bm->releasePageImpl = paReleasePage;
|
||||
bm->openHandleImpl = paOpenHandle;
|
||||
|
|
|
@ -64,7 +64,7 @@ struct stasis_buffer_pool_t {
|
|||
|
||||
stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
||||
|
||||
stasis_buffer_pool_t * ret = malloc(sizeof(*ret));
|
||||
stasis_buffer_pool_t * ret = stasis_malloc(1, stasis_buffer_pool_t);
|
||||
|
||||
ret->nextPage = 0;
|
||||
|
||||
|
@ -72,7 +72,7 @@ stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
|||
|
||||
#ifndef VALGRIND_MODE
|
||||
|
||||
byte * bufferSpace = malloc((stasis_buffer_manager_size + 2) * PAGE_SIZE);
|
||||
byte * bufferSpace = stasis_malloc((stasis_buffer_manager_size + 2) * PAGE_SIZE, byte);
|
||||
assert(bufferSpace);
|
||||
ret->addr_to_free = bufferSpace;
|
||||
|
||||
|
@ -85,7 +85,7 @@ stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
|||
|
||||
// We need one dummy page for locking purposes,
|
||||
// so this array has one extra page in it.
|
||||
ret->pool = malloc(sizeof(ret->pool[0])*(stasis_buffer_manager_size+1));
|
||||
ret->pool = stasis_malloc(stasis_buffer_manager_size+1, Page);
|
||||
|
||||
for(pageid_t i = 0; i < stasis_buffer_manager_size+1; i++) {
|
||||
ret->pool[i].rwlatch = initlock();
|
||||
|
@ -93,7 +93,7 @@ stasis_buffer_pool_t* stasis_buffer_pool_init() {
|
|||
#ifndef VALGRIND_MODE
|
||||
ret->pool[i].memAddr = &(bufferSpace[i*PAGE_SIZE]);
|
||||
#else
|
||||
ret->pool[i].memAddr = malloc(PAGE_SIZE);
|
||||
ret->pool[i].memAddr = stasis_malloc(PAGE_SIZE, byte);
|
||||
#endif
|
||||
ret->pool[i].dirty = 0;
|
||||
ret->pool[i].needsFlush = 0;
|
||||
|
|
|
@ -58,7 +58,7 @@ int __lladd_pthread_mutex_lock(lladd_pthread_mutex_t *mutex, char * file, int li
|
|||
mutex->last_acquired_at = location;
|
||||
|
||||
if(!tup) {
|
||||
tup = malloc(sizeof(profile_tuple));
|
||||
tup = stasis_malloc(1, profile_tuple);
|
||||
|
||||
init_tuple(tup);
|
||||
|
||||
|
@ -133,7 +133,7 @@ int __lladd_pthread_cond_wait(pthread_cond_t *cond, lladd_pthread_mutex_t *mutex
|
|||
mutex->last_acquired_at = location;
|
||||
|
||||
if(!tup) {
|
||||
tup = malloc(sizeof(profile_tuple));
|
||||
tup = stasis_malloc(1, profile_tuple);
|
||||
|
||||
init_tuple(tup);
|
||||
|
||||
|
@ -167,7 +167,7 @@ int __lladd_pthread_cond_timedwait(pthread_cond_t *cond, lladd_pthread_mutex_t *
|
|||
#undef downgradelock
|
||||
|
||||
__profile_rwl *__profile_rw_initlock (char * file, int line) {
|
||||
__profile_rwl * ret = malloc(sizeof(__profile_rwl));
|
||||
__profile_rwl * ret = stasis_malloc(1, __profile_rwl);
|
||||
|
||||
ret->file = file;
|
||||
ret->line = line;
|
||||
|
@ -219,7 +219,7 @@ void __profile_readlock (__profile_rwl *lock, int d, char * file, int line) {
|
|||
lock->last_acquired_at = location;
|
||||
|
||||
if(!tup) {
|
||||
tup = malloc(sizeof(profile_tuple));
|
||||
tup = stasis_malloc(1, profile_tuple);
|
||||
|
||||
init_tuple(tup);
|
||||
|
||||
|
@ -258,7 +258,7 @@ void __profile_writelock (__profile_rwl *lock, int d, char * file, int line) {
|
|||
lock->last_acquired_at = location;
|
||||
|
||||
if(!tup) {
|
||||
tup = malloc(sizeof(profile_tuple));
|
||||
tup = stasis_malloc(1, profile_tuple);
|
||||
|
||||
init_tuple(tup);
|
||||
|
||||
|
|
|
@ -55,13 +55,13 @@ void stasis_dirty_page_table_set_dirty(stasis_dirty_page_table_t * dirtyPages, P
|
|||
pthread_mutex_lock(&dirtyPages->mutex);
|
||||
if(!p->dirty) {
|
||||
p->dirty = 1;
|
||||
dpt_entry * e = malloc(sizeof(*e));
|
||||
dpt_entry * e = stasis_malloc(1, dpt_entry);
|
||||
e->p = p->id;
|
||||
e->lsn = p->LSN;
|
||||
const void * ret = rbsearch(e, dirtyPages->tableByPage);
|
||||
assert(ret == e); // otherwise, the entry was already in the table.
|
||||
|
||||
e = malloc(sizeof(*e));
|
||||
e = stasis_malloc(1, dpt_entry);
|
||||
e->p = p->id;
|
||||
e->lsn = p->LSN;
|
||||
ret = rbsearch(e, dirtyPages->tableByLsnAndPage);
|
||||
|
@ -344,7 +344,7 @@ void stasis_dirty_page_table_set_buffer_manager(stasis_dirty_page_table_t * dpt,
|
|||
}
|
||||
|
||||
stasis_dirty_page_table_t * stasis_dirty_page_table_init() {
|
||||
stasis_dirty_page_table_t * ret = malloc(sizeof(*ret));
|
||||
stasis_dirty_page_table_t * ret = stasis_malloc(1, stasis_dirty_page_table_t);
|
||||
ret->outstanding_flush_lsns = stasis_util_multiset_create();
|
||||
|
||||
ret->tableByPage = rbinit(dpt_cmp_page, 0);
|
||||
|
|
|
@ -61,7 +61,7 @@ static stasis_write_buffer_t * debug_write_buffer(stasis_handle_t * h,
|
|||
printf("tid=%9ld call write_buffer(%lx, %lld, %lld)\n",
|
||||
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
||||
stasis_write_buffer_t * ret = hh->write_buffer(hh,off,len);
|
||||
stasis_write_buffer_t * retWrap = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t * retWrap = stasis_malloc(1, stasis_write_buffer_t);
|
||||
*retWrap = *ret;
|
||||
retWrap->h = h;
|
||||
retWrap->impl = ret;
|
||||
|
@ -85,7 +85,7 @@ static stasis_read_buffer_t * debug_read_buffer(stasis_handle_t * h,
|
|||
printf("tid=%9ld call read_buffer(%lx, %lld, %lld)\n",
|
||||
(long)(intptr_t)pthread_self(), (unsigned long)hh, off, len); fflush(stdout);
|
||||
stasis_read_buffer_t * ret = hh->read_buffer(hh,off,len);
|
||||
stasis_read_buffer_t * retWrap = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t * retWrap = stasis_malloc(1, stasis_read_buffer_t);
|
||||
*retWrap = *ret;
|
||||
retWrap->h = h;
|
||||
retWrap->impl = ret;
|
||||
|
@ -156,9 +156,9 @@ struct stasis_handle_t debug_func = {
|
|||
|
||||
|
||||
stasis_handle_t * stasis_handle(open_debug)(stasis_handle_t * h) {
|
||||
stasis_handle_t * ret = malloc(sizeof(stasis_handle_t));
|
||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
||||
*ret = debug_func;
|
||||
ret->impl = malloc(sizeof(debug_impl));
|
||||
ret->impl = stasis_malloc(1, debug_impl);
|
||||
((debug_impl*)(ret->impl))->h = h;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ static int file_write(stasis_handle_t *h, lsn_t off, const byte * dat, lsn_t len
|
|||
static stasis_write_buffer_t * file_write_buffer(stasis_handle_t * h,
|
||||
lsn_t off, lsn_t len) {
|
||||
// Allocate the handle
|
||||
stasis_write_buffer_t * ret = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
||||
if(!ret) { return NULL; }
|
||||
|
||||
file_impl * impl = (file_impl*)h->impl;
|
||||
|
@ -264,7 +264,7 @@ static stasis_write_buffer_t * file_write_buffer(stasis_handle_t * h,
|
|||
byte * buf;
|
||||
if(!error) {
|
||||
// Allocate the buffer
|
||||
buf = malloc(len);
|
||||
buf = stasis_malloc(len, byte);
|
||||
if(!buf) {
|
||||
error = ENOMEM;
|
||||
}
|
||||
|
@ -312,10 +312,10 @@ static int file_release_write_buffer(stasis_write_buffer_t * w) {
|
|||
|
||||
static stasis_read_buffer_t * file_read_buffer(stasis_handle_t * h,
|
||||
lsn_t off, lsn_t len) {
|
||||
stasis_read_buffer_t * ret = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
||||
if(!ret) { return NULL; }
|
||||
|
||||
byte * buf = malloc(len);
|
||||
byte * buf = stasis_malloc(len, byte);
|
||||
int error = 0;
|
||||
|
||||
if(!buf) { error = ENOMEM; }
|
||||
|
@ -484,11 +484,11 @@ struct stasis_handle_t file_func = {
|
|||
};
|
||||
|
||||
stasis_handle_t * stasis_handle(open_file)(const char * filename, int flags, int mode) {
|
||||
stasis_handle_t * ret = malloc(sizeof(stasis_handle_t));
|
||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
||||
if(!ret) { return NULL; }
|
||||
*ret = file_func;
|
||||
|
||||
file_impl * impl = malloc(sizeof(file_impl));
|
||||
file_impl * impl = stasis_malloc(1, file_impl);
|
||||
ret->impl = impl;
|
||||
pthread_mutex_init(&(impl->mut), 0);
|
||||
assert(sizeof(off_t) >= (64/8));
|
||||
|
|
|
@ -45,7 +45,7 @@ static stasis_write_buffer_t * mem_write_buffer(stasis_handle_t * h,
|
|||
lsn_t off, lsn_t len) {
|
||||
mem_impl* impl = (mem_impl*)(h->impl);
|
||||
|
||||
stasis_write_buffer_t * ret = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
||||
if(!ret) { return NULL; }
|
||||
|
||||
pthread_mutex_lock(&(impl->mut));
|
||||
|
@ -62,7 +62,7 @@ static stasis_write_buffer_t * mem_write_buffer(stasis_handle_t * h,
|
|||
newbuf = realloc(impl->buf, off+len);
|
||||
} else {
|
||||
free(impl->buf);
|
||||
newbuf = malloc(0);
|
||||
newbuf = stasis_malloc(0, byte);
|
||||
}
|
||||
if(newbuf) {
|
||||
impl->buf = newbuf;
|
||||
|
@ -102,7 +102,7 @@ static stasis_read_buffer_t * mem_read_buffer(stasis_handle_t * h,
|
|||
mem_impl * impl = (mem_impl*)(h->impl);
|
||||
pthread_mutex_lock(&(impl->mut));
|
||||
|
||||
stasis_read_buffer_t * ret = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
||||
if(!ret) { return NULL; }
|
||||
|
||||
if(off < 0 || off + len > impl->end_pos) {
|
||||
|
@ -183,11 +183,11 @@ struct stasis_handle_t mem_func = {
|
|||
};
|
||||
|
||||
stasis_handle_t * stasis_handle(open_memory)() {
|
||||
stasis_handle_t * ret = malloc(sizeof(stasis_handle_t));
|
||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
||||
if(!ret) { return NULL; }
|
||||
*ret = mem_func;
|
||||
|
||||
mem_impl * impl = malloc(sizeof(mem_impl));
|
||||
mem_impl * impl = stasis_malloc(1, mem_impl);
|
||||
ret->impl = impl;
|
||||
pthread_mutex_init(&(impl->mut), 0);
|
||||
impl->end_pos = 0;
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
static inline stasis_read_buffer_t* alloc_read_buffer_error(stasis_handle_t *h,
|
||||
int error) {
|
||||
assert(error);
|
||||
stasis_read_buffer_t * r = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t * r = stasis_malloc(1, stasis_read_buffer_t);
|
||||
r->h = h;
|
||||
r->buf = 0;
|
||||
r->len = 0;
|
||||
|
@ -78,7 +78,7 @@ static inline stasis_read_buffer_t* alloc_read_buffer_error(stasis_handle_t *h,
|
|||
static inline stasis_write_buffer_t* alloc_write_buffer_error
|
||||
(stasis_handle_t *h, int error) {
|
||||
assert(error);
|
||||
stasis_write_buffer_t * w = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t * w = stasis_malloc(1, stasis_write_buffer_t);
|
||||
w->h = h;
|
||||
w->off = 0;
|
||||
w->buf = 0;
|
||||
|
@ -214,7 +214,7 @@ static void releaseSlowHandle(nbw_impl * impl, stasis_handle_t * slow) {
|
|||
}
|
||||
|
||||
static tree_node * allocTreeNode(lsn_t off, lsn_t len) {
|
||||
tree_node * ret = malloc(sizeof(tree_node));
|
||||
tree_node * ret = stasis_malloc(1, tree_node);
|
||||
ret->start_pos = off;
|
||||
ret->end_pos = off + len;
|
||||
ret->dirty = CLEAN;
|
||||
|
@ -408,11 +408,11 @@ static stasis_write_buffer_t * nbw_write_buffer(stasis_handle_t * h,
|
|||
const tree_node * n = allocFastHandle(impl, off, len);
|
||||
stasis_write_buffer_t * w = n->h->write_buffer(n->h, off, len);
|
||||
|
||||
write_buffer_impl * w_impl = malloc(sizeof(write_buffer_impl));
|
||||
write_buffer_impl * w_impl = stasis_malloc(1, write_buffer_impl);
|
||||
w_impl->n = n;
|
||||
w_impl->w = w;
|
||||
|
||||
stasis_write_buffer_t * ret = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t * ret = stasis_malloc(1, stasis_write_buffer_t);
|
||||
ret->h = h;
|
||||
ret->off = w->off;
|
||||
ret->len = w->len;
|
||||
|
@ -454,11 +454,11 @@ static stasis_read_buffer_t * nbw_read_buffer(stasis_handle_t * h,
|
|||
stasis_handle_t * r_h = n ? n->h : getSlowHandle(impl);
|
||||
r = r_h->read_buffer(r_h, off, len);
|
||||
|
||||
read_buffer_impl * r_impl = malloc(sizeof(read_buffer_impl));
|
||||
read_buffer_impl * r_impl = stasis_malloc(1, read_buffer_impl);
|
||||
r_impl->n = n;
|
||||
r_impl->r = r;
|
||||
|
||||
stasis_read_buffer_t * ret = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t * ret = stasis_malloc(1, stasis_read_buffer_t);
|
||||
ret->h = h;
|
||||
ret->off = r->off;
|
||||
ret->len = r->len;
|
||||
|
@ -661,11 +661,11 @@ static void * nbw_worker(void * handle) {
|
|||
len += np_len;
|
||||
|
||||
if(first) {
|
||||
buf = malloc(r->len + len);
|
||||
buf = stasis_malloc(r->len + len, byte);
|
||||
memcpy(buf, r->buf, r->len);
|
||||
buf_off += r->len;
|
||||
|
||||
dummies = malloc(sizeof(tree_node));
|
||||
dummies = stasis_malloc(1, tree_node);
|
||||
dummies[0] = dummy;
|
||||
dummy_count = 1;
|
||||
first = 0;
|
||||
|
@ -774,7 +774,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
|||
stasis_handle_t * (*fast_factory)(lsn_t, lsn_t, void *),
|
||||
void * fast_factory_arg, int worker_thread_count, lsn_t buffer_size,
|
||||
int max_fast_handles) {
|
||||
nbw_impl * impl = malloc(sizeof(nbw_impl));
|
||||
nbw_impl * impl = stasis_malloc(1, nbw_impl);
|
||||
pthread_mutex_init(&impl->mut, 0);
|
||||
|
||||
impl->end_pos = 0;
|
||||
|
@ -787,7 +787,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
|||
|
||||
impl->available_slow_handles = 0;
|
||||
impl->available_slow_handle_count = 0;
|
||||
impl->all_slow_handles = malloc(sizeof(stasis_handle_t*));
|
||||
impl->all_slow_handles = stasis_malloc(1, stasis_handle_t*);
|
||||
impl->all_slow_handle_count = 0;
|
||||
|
||||
impl->requested_bytes_written = 0;
|
||||
|
@ -803,7 +803,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
|||
impl->max_buffer_size = buffer_size;
|
||||
impl->used_buffer_size = 0;
|
||||
|
||||
impl->workers = malloc(worker_thread_count * sizeof(pthread_t));
|
||||
impl->workers = stasis_malloc(worker_thread_count, pthread_t);
|
||||
impl->worker_count = worker_thread_count;
|
||||
|
||||
pthread_cond_init(&impl->pending_writes_cond, 0);
|
||||
|
@ -812,7 +812,7 @@ stasis_handle_t * stasis_handle(open_non_blocking)
|
|||
impl->still_open = 1;
|
||||
impl->refcount = 1;
|
||||
|
||||
stasis_handle_t *h = malloc(sizeof(stasis_handle_t));
|
||||
stasis_handle_t *h = stasis_malloc(1, stasis_handle_t);
|
||||
*h = nbw_func;
|
||||
h->impl = impl;
|
||||
|
||||
|
@ -855,7 +855,7 @@ static stasis_handle_t * slow_pfile_factory(void * argsP) {
|
|||
return h;
|
||||
}
|
||||
static int nop_close(stasis_handle_t*h) { return 0; }
|
||||
struct sf_args * slow_arg = malloc(sizeof(sf_args));
|
||||
struct sf_args * slow_arg = stasis_malloc(1, struct sf_args);
|
||||
slow_arg->filename = path;
|
||||
|
||||
slow_arg->openMode = openMode;
|
||||
|
|
|
@ -64,7 +64,7 @@ typedef struct pfile_impl {
|
|||
*/
|
||||
static int pfile_num_copies(stasis_handle_t *h) { return 0; }
|
||||
/**
|
||||
We have to call malloc(), but not memcpy(). Maybe this should return 1.
|
||||
We have to call malloc, but not memcpy. Maybe this should return 1.
|
||||
*/
|
||||
static int pfile_num_copies_buffer(stasis_handle_t *h) { return 0; }
|
||||
|
||||
|
@ -198,7 +198,7 @@ static int pfile_write(stasis_handle_t *h, lsn_t off, const byte *dat,
|
|||
|
||||
static stasis_write_buffer_t * pfile_write_buffer(stasis_handle_t *h,
|
||||
lsn_t off, lsn_t len) {
|
||||
stasis_write_buffer_t *ret = malloc(sizeof(stasis_write_buffer_t));
|
||||
stasis_write_buffer_t *ret = stasis_malloc(1, stasis_write_buffer_t);
|
||||
|
||||
if (!ret) {
|
||||
h->error = ENOMEM;
|
||||
|
@ -213,7 +213,7 @@ static stasis_write_buffer_t * pfile_write_buffer(stasis_handle_t *h,
|
|||
|
||||
byte *buf;
|
||||
if (!error) {
|
||||
buf = malloc(len);
|
||||
buf = stasis_malloc(len, byte);
|
||||
if (!buf) { error = ENOMEM; }
|
||||
}
|
||||
if (error) {
|
||||
|
@ -249,10 +249,10 @@ static int pfile_release_write_buffer(stasis_write_buffer_t *w) {
|
|||
|
||||
static stasis_read_buffer_t *pfile_read_buffer(stasis_handle_t *h,
|
||||
lsn_t off, lsn_t len) {
|
||||
stasis_read_buffer_t *ret = malloc(sizeof(stasis_read_buffer_t));
|
||||
stasis_read_buffer_t *ret = stasis_malloc(1, stasis_read_buffer_t);
|
||||
if (!ret) { return NULL; }
|
||||
|
||||
byte *buf = malloc(len);
|
||||
byte *buf = stasis_malloc(len,byte);
|
||||
int error = 0;
|
||||
|
||||
if (!buf) { error = ENOMEM; }
|
||||
|
@ -417,11 +417,11 @@ static struct stasis_handle_t pfile_func = {
|
|||
|
||||
stasis_handle_t *stasis_handle(open_pfile)(const char *filename,
|
||||
int flags, int mode) {
|
||||
stasis_handle_t *ret = malloc(sizeof(stasis_handle_t));
|
||||
stasis_handle_t *ret = stasis_malloc(1, stasis_handle_t);
|
||||
if (!ret) { return NULL; }
|
||||
*ret = pfile_func;
|
||||
|
||||
pfile_impl *impl = malloc(sizeof(pfile_impl));
|
||||
pfile_impl *impl = stasis_malloc(1, pfile_impl);
|
||||
if (!impl) { free(ret); return NULL; }
|
||||
|
||||
ret->impl = impl;
|
||||
|
|
|
@ -57,7 +57,7 @@ static int raid0_close(stasis_handle_t *h) {
|
|||
}
|
||||
static stasis_handle_t* raid0_dup(stasis_handle_t *h) {
|
||||
raid0_impl * r = h->impl;
|
||||
stasis_handle_t ** h_dup = malloc(sizeof(h_dup[0]) * r->handle_count);
|
||||
stasis_handle_t ** h_dup = stasis_malloc(r->handle_count, stasis_handle_t*);
|
||||
for(int i = 0; i < r->handle_count; i++) {
|
||||
h_dup[i] = r->h[i]->dup(r->h[i]);
|
||||
}
|
||||
|
@ -191,12 +191,12 @@ struct stasis_handle_t raid0_func = {
|
|||
};
|
||||
|
||||
stasis_handle_t * stasis_handle_open_raid0(int handle_count, stasis_handle_t** h, uint32_t stripe_size) {
|
||||
stasis_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
||||
*ret = raid0_func;
|
||||
raid0_impl * r = malloc(sizeof(*r));
|
||||
raid0_impl * r = stasis_malloc(1, raid0_impl);
|
||||
r->stripe_size = stripe_size;
|
||||
r->handle_count = handle_count;
|
||||
r->h = malloc(sizeof(r->h[0]) * handle_count);
|
||||
r->h = stasis_malloc(handle_count, stasis_handle_t*);
|
||||
for(int i = 0; i < handle_count; i++) {
|
||||
r->h[i] = h[i];
|
||||
}
|
||||
|
|
|
@ -149,9 +149,9 @@ struct stasis_handle_t raid1_func = {
|
|||
};
|
||||
|
||||
stasis_handle_t * stasis_handle_open_raid1(stasis_handle_t* a, stasis_handle_t* b) {
|
||||
stasis_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_handle_t * ret = stasis_malloc(1, stasis_handle_t);
|
||||
*ret = raid1_func;
|
||||
raid1_impl * i = malloc(sizeof(*i));
|
||||
raid1_impl * i = stasis_malloc(1, raid1_impl);
|
||||
i->a = a; i->b = b;
|
||||
ret->impl = i;
|
||||
return ret;
|
||||
|
|
|
@ -24,7 +24,7 @@ static int cmp_transition(const void * a, const void * b, const void * arg) {
|
|||
}
|
||||
|
||||
rangeTracker * rangeTrackerInit(int quantization) {
|
||||
rangeTracker * ret = malloc(sizeof(rangeTracker));
|
||||
rangeTracker * ret = stasis_malloc(1, rangeTracker);
|
||||
ret->ranges = RB_ENTRY(init)(cmp_transition, 0);
|
||||
ret->quantization = quantization;
|
||||
return ret;
|
||||
|
@ -64,7 +64,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
|||
assert(t->pins + t->delta >= 0);
|
||||
if(t->pos != r->start) {
|
||||
int newpins = t->pins + t->delta;
|
||||
t = malloc(sizeof(transition));
|
||||
t = stasis_malloc(1, transition);
|
||||
t->pos = r->start;
|
||||
t->delta = delta;
|
||||
t->pins = newpins;
|
||||
|
@ -84,7 +84,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
t = malloc(sizeof(transition));
|
||||
t = stasis_malloc(1, transition);
|
||||
t->pos = r->start;
|
||||
t->delta = delta;
|
||||
t->pins = 0;
|
||||
|
@ -111,7 +111,7 @@ static void rangeTrackerDelta(rangeTracker * rt, const range * r, int delta) {
|
|||
}
|
||||
if(!t || t->pos != r->stop) {
|
||||
// Need to allocate new transition
|
||||
t = malloc(sizeof(transition));
|
||||
t = stasis_malloc(1, transition);
|
||||
t->pos = r->stop;
|
||||
t->delta = 0-delta;
|
||||
t->pins = curpin;
|
||||
|
@ -167,7 +167,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
|||
assert(!t->pins);
|
||||
assert(t->delta);
|
||||
assert(! ret[next_range] );
|
||||
ret[next_range] = malloc(sizeof(range));
|
||||
ret[next_range] = stasis_malloc(1, range);
|
||||
ret[next_range]->start = t->pos;
|
||||
in_range = 1;
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ static range ** rangeTrackerToArray(rangeTracker * rt) {
|
|||
if(t->pins + t->delta) {
|
||||
if(!in_range) {
|
||||
assert(! ret[next_range]);
|
||||
ret[next_range] = malloc(sizeof(range));
|
||||
ret[next_range] = stasis_malloc(1, range);
|
||||
ret[next_range]->start = t->pos;
|
||||
in_range = 1;
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ const transition ** rangeTrackerEnumerate(rangeTracker * rt) {
|
|||
}
|
||||
RB_ENTRY(closelist)(list);
|
||||
|
||||
const transition ** ret = malloc(sizeof(transition **) * (transitionCount + 1));
|
||||
const transition ** ret = stasis_malloc(transitionCount + 1, const transition*);
|
||||
|
||||
list = RB_ENTRY(openlist) (rt->ranges);
|
||||
int i = 0;
|
||||
|
|
|
@ -352,7 +352,7 @@ LogEntry * stasis_log_file_pool_reserve_entry(stasis_log_t * log, size_t szs) {
|
|||
uint32_t sz = szs;
|
||||
stasis_log_file_pool_state * fp = log->impl;
|
||||
lsn_t * handle = pthread_getspecific(fp->handle_key);
|
||||
if(!handle) { handle = malloc(sizeof(*handle)); pthread_setspecific(fp->handle_key, handle); }
|
||||
if(!handle) { handle = stasis_malloc(1, lsn_t); pthread_setspecific(fp->handle_key, handle); }
|
||||
|
||||
uint64_t framed_size = sz+sizeof(uint32_t)+sizeof(uint32_t);
|
||||
lsn_t off = stasis_ringbuffer_reserve_space(fp->ring, framed_size, handle);
|
||||
|
@ -663,8 +663,8 @@ void * stasis_log_file_pool_writeback_worker(void * arg) {
|
|||
int chunk = get_chunk_from_offset(log, off);
|
||||
int endchunk = get_chunk_from_offset(log, off + len);
|
||||
// build vector of write operations.
|
||||
int* fds = malloc(sizeof(int) * (1 + endchunk-chunk));
|
||||
lsn_t* file_offs = malloc(sizeof(lsn_t) * (1 + endchunk-chunk));
|
||||
int* fds = stasis_malloc(1 + endchunk - chunk, int);
|
||||
lsn_t* file_offs = stasis_malloc(1 + endchunk-chunk, lsn_t);
|
||||
for(int c = chunk; c <= endchunk; c++) {
|
||||
fds[c-chunk] = fp->ro_fd[c];
|
||||
file_offs[c-chunk] = fp->live_offsets[c];
|
||||
|
@ -731,8 +731,8 @@ int filesort(const void * ap, const void * bp) {
|
|||
*/
|
||||
stasis_log_t* stasis_log_file_pool_open(const char* dirname, int filemode, int fileperm) {
|
||||
struct dirent **namelist;
|
||||
stasis_log_file_pool_state* fp = malloc(sizeof(*fp));
|
||||
stasis_log_t * ret = malloc(sizeof(*ret));
|
||||
stasis_log_file_pool_state* fp = stasis_malloc(1, stasis_log_file_pool_state);
|
||||
stasis_log_t * ret = stasis_malloc(1, stasis_log_t);
|
||||
|
||||
static const stasis_log_t proto = {
|
||||
stasis_log_file_pool_set_truncation,
|
||||
|
|
|
@ -27,7 +27,7 @@ stasis_log_group_force_t * stasis_log_group_force_init(stasis_log_t * log, uint6
|
|||
"times > 1 second. (%llu second wait time requested)\n",
|
||||
(long long unsigned int) (wait_nsec / (1000 * 1000 * 1000)));
|
||||
}
|
||||
stasis_log_group_force_t * ret = malloc(sizeof(*ret));
|
||||
stasis_log_group_force_t * ret = stasis_malloc(1, stasis_log_group_force_t);
|
||||
ret->log = log;
|
||||
pthread_mutex_init(&ret->check_commit,0);
|
||||
pthread_cond_init(&ret->tooFewXacts,0);
|
||||
|
|
|
@ -189,7 +189,7 @@ static void stasis_log_impl_in_memory_set_truncation(stasis_log_t *log, stasis_t
|
|||
}
|
||||
|
||||
stasis_log_t* stasis_log_impl_in_memory_open() {
|
||||
stasis_log_impl_in_memory * impl = malloc(sizeof(*impl));
|
||||
stasis_log_impl_in_memory * impl = stasis_malloc(1, stasis_log_impl_in_memory);
|
||||
impl->flushedLSN_lock = initlock();
|
||||
impl->globalOffset_lock = initlock();
|
||||
impl->globalOffset = 0;
|
||||
|
@ -201,7 +201,7 @@ stasis_log_t* stasis_log_impl_in_memory_open() {
|
|||
impl->bufferLen = stasis_log_in_memory_max_entries;
|
||||
impl->maxLen = impl->bufferLen;
|
||||
}
|
||||
impl->buffer = malloc(impl->bufferLen * sizeof (LogEntry *));
|
||||
impl->buffer = stasis_malloc(impl->bufferLen, LogEntry *);
|
||||
impl->trunc = 0;
|
||||
static stasis_log_t proto = {
|
||||
stasis_log_impl_in_memory_set_truncation,
|
||||
|
@ -221,7 +221,7 @@ stasis_log_t* stasis_log_impl_in_memory_open() {
|
|||
stasis_log_impl_in_memory_close,
|
||||
stasis_log_impl_in_memory_is_durable
|
||||
};
|
||||
stasis_log_t* log = malloc(sizeof(*log));
|
||||
stasis_log_t* log = stasis_malloc(1, stasis_log_t);
|
||||
memcpy(log,&proto, sizeof(proto));
|
||||
log->impl = impl;
|
||||
return log;
|
||||
|
|
|
@ -68,7 +68,7 @@ LogHandle* getLogHandle(stasis_log_t* log) {
|
|||
}
|
||||
|
||||
LogHandle* getLSNHandle(stasis_log_t * log, lsn_t lsn) {
|
||||
LogHandle* ret = malloc(sizeof(*ret));
|
||||
LogHandle* ret = stasis_malloc(1, LogHandle);
|
||||
ret->next_offset = lsn;
|
||||
ret->prev_offset = lsn;
|
||||
ret->last = 0;
|
||||
|
|
|
@ -86,7 +86,7 @@ stasis_log_reordering_handle_open(stasis_transaction_table_entry_t * l,
|
|||
size_t chunk_len,
|
||||
size_t max_len,
|
||||
size_t max_size) {
|
||||
stasis_log_reordering_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_log_reordering_handle_t * ret = stasis_malloc(1, stasis_log_reordering_handle_t);
|
||||
|
||||
ret->l = l;
|
||||
ret->log = log;
|
||||
|
|
|
@ -840,7 +840,7 @@ stasis_log_t* stasis_log_safe_writes_open(const char * filename,
|
|||
isDurable_LogWriter, // is_durable
|
||||
};
|
||||
|
||||
stasis_log_safe_writes_state * sw = malloc(sizeof(*sw));
|
||||
stasis_log_safe_writes_state * sw = stasis_malloc(1, stasis_log_safe_writes_state);
|
||||
sw->filename = strdup(filename);
|
||||
{
|
||||
char * log_scratch_filename = malloc(strlen(sw->filename) + 2);
|
||||
|
@ -852,7 +852,7 @@ stasis_log_t* stasis_log_safe_writes_open(const char * filename,
|
|||
sw->fileperm = fileperm;
|
||||
sw->softcommit = softcommit;
|
||||
|
||||
stasis_log_t* log = malloc(sizeof(*log));
|
||||
stasis_log_t* log = stasis_malloc(1, stasis_log_t);
|
||||
memcpy(log,&proto, sizeof(proto));
|
||||
log->impl = sw;
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ int stasis_alloc_callback(int xid, void * arg) {
|
|||
}
|
||||
|
||||
stasis_alloc_t* stasis_alloc_init(stasis_transaction_table_t * tbl, stasis_allocation_policy_t * allocPolicy) {
|
||||
stasis_alloc_t * alloc = malloc(sizeof(*alloc));
|
||||
stasis_alloc_t * alloc = stasis_malloc(1, stasis_alloc_t);
|
||||
alloc->lastFreepage = PAGEID_T_MAX;
|
||||
alloc->allocPolicy = allocPolicy;
|
||||
pthread_mutex_init(&alloc->mut, 0);
|
||||
|
|
|
@ -456,9 +456,9 @@ typedef struct {
|
|||
} lladd_linearHashNTA_generic_it;
|
||||
|
||||
lladdIterator_t * ThashGenericIterator(int xid, recordid hash) {
|
||||
lladdIterator_t * ret = malloc(sizeof(lladdIterator_t));
|
||||
lladdIterator_t * ret = stasis_malloc(1, lladdIterator_t);
|
||||
ret->type = LINEAR_HASH_NTA_ITERATOR;
|
||||
ret->impl = malloc(sizeof(lladd_linearHashNTA_generic_it));
|
||||
ret->impl = stasis_malloc(1, lladd_linearHashNTA_generic_it);
|
||||
|
||||
((lladd_linearHashNTA_generic_it*)(ret->impl))->hit = ThashIterator(xid, hash, -1, -1);
|
||||
((lladd_linearHashNTA_generic_it*)(ret->impl))->lastKey = NULL;
|
||||
|
|
|
@ -371,7 +371,7 @@ recordid TnaiveHashCreate(int xid, int keySize, int valSize) {
|
|||
assert(headerRidB);
|
||||
Page * p = loadPage(xid, rid.page);
|
||||
readlock(p->rwlatch,0);
|
||||
recordid * check = malloc(stasis_record_type_to_size(stasis_record_dereference(xid, p, rid).size));
|
||||
recordid * check = (recordid*)malloc(stasis_record_type_to_size(stasis_record_dereference(xid, p, rid).size));
|
||||
unlock(p->rwlatch);
|
||||
releasePage(p);
|
||||
rid.slot = 0;
|
||||
|
@ -432,7 +432,7 @@ int TnaiveHashDelete(int xid, recordid hashRid,
|
|||
recordid deleteMe;
|
||||
hashRid.slot = bucket_number;
|
||||
|
||||
hashEntry * bucket_contents = malloc(sizeof(hashEntry) + keySize + valSize);
|
||||
hashEntry * bucket_contents = (hashEntry*)malloc(sizeof(hashEntry) + keySize + valSize);
|
||||
assert(hashRid.size == sizeof(hashEntry) + keySize + valSize);
|
||||
Tread(xid, hashRid, bucket_contents);
|
||||
hashRid.slot = 0;
|
||||
|
@ -448,7 +448,7 @@ int TnaiveHashDelete(int xid, recordid hashRid,
|
|||
}
|
||||
|
||||
int TnaiveHashOpen(int xid, recordid hashRid, int keySize, int valSize) {
|
||||
recordid * headerRidB = malloc(sizeof(recordid) + keySize + valSize);
|
||||
recordid * headerRidB = (recordid*)malloc(sizeof(recordid) + keySize + valSize);
|
||||
hashRid.slot = 1;
|
||||
Tread(xid, hashRid, headerRidB);
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ int TpageSetRange(int xid, pageid_t page, int offset, const void * memAddr, int
|
|||
// XXX need to pack offset into front of log entry
|
||||
|
||||
Page * p = loadPage(xid, page);
|
||||
byte * logArg = malloc(sizeof(int) + 2 * len);
|
||||
byte * logArg = stasis_malloc(sizeof(int) + 2 * len, byte);
|
||||
|
||||
*(int*)logArg = offset;
|
||||
memcpy(logArg+sizeof(int), ((const byte*)memAddr), len);
|
||||
|
|
|
@ -60,7 +60,7 @@ int TpagedListInsert(int xid, recordid list, const byte * key, int keySize, cons
|
|||
DEBUG("Alloced rid: {%d %d %d}", rid.page, rid.slot, rid.size);
|
||||
}
|
||||
|
||||
pagedListEntry * dat = malloc(entrySize);
|
||||
pagedListEntry * dat = (pagedListEntry*)malloc(entrySize);
|
||||
|
||||
dat->keySize = keySize;
|
||||
dat->nextEntry = header.thisPage;
|
||||
|
@ -90,12 +90,12 @@ int TpagedListFind(int xid, recordid list, const byte * key, int keySize, byte *
|
|||
if(rid.slot) {
|
||||
rid.size = TrecordSize(xid, rid);
|
||||
pagedListEntry * dat;
|
||||
dat = malloc(rid.size);
|
||||
dat = (pagedListEntry*)malloc(rid.size);
|
||||
Tread(xid, rid, dat);
|
||||
|
||||
if(dat->keySize == keySize && !memcmp(dat+1, key, keySize)) {
|
||||
int valueSize = rid.size - keySize - sizeof(pagedListEntry);
|
||||
*value = malloc(valueSize);
|
||||
*value = stasis_malloc(valueSize, byte);
|
||||
memcpy(*value, ((byte*)(dat+1))+keySize, valueSize);
|
||||
free(dat);
|
||||
return valueSize;
|
||||
|
@ -125,7 +125,7 @@ int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
|||
while(rid.slot || header.nextPage.size != -1) {
|
||||
if(rid.slot) {
|
||||
rid.size = TrecordSize(xid, rid);
|
||||
pagedListEntry * dat = malloc(rid.size);
|
||||
pagedListEntry * dat = (pagedListEntry*)malloc(rid.size);
|
||||
Tread(xid, rid, dat);
|
||||
|
||||
if(dat->keySize == keySize && !memcmp(dat+1, key, keySize)) {
|
||||
|
@ -134,7 +134,7 @@ int TpagedListRemove(int xid, recordid list, const byte * key, int keySize) {
|
|||
recordid lastRid = rid;
|
||||
lastRid.slot = lastSlot;
|
||||
lastRid.size = TrecordSize(xid, lastRid);
|
||||
pagedListEntry * lastRidBuf = malloc(lastRid.size);
|
||||
pagedListEntry * lastRidBuf = (pagedListEntry*)malloc(lastRid.size);
|
||||
Tread(xid, lastRid, lastRidBuf);
|
||||
lastRidBuf->nextEntry = dat->nextEntry;
|
||||
Tset(xid, lastRid, lastRidBuf);
|
||||
|
@ -183,7 +183,7 @@ lladd_pagedList_iterator * TpagedListIterator(int xid, recordid list) {
|
|||
assert(list.size == sizeof(pagedListHeader));
|
||||
Tread(xid, list, &header);
|
||||
|
||||
lladd_pagedList_iterator * it = malloc(sizeof(lladd_pagedList_iterator));
|
||||
lladd_pagedList_iterator * it = stasis_malloc(1, lladd_pagedList_iterator);
|
||||
|
||||
it->headerRid = header.nextPage;
|
||||
it->entryRid = list;
|
||||
|
@ -203,14 +203,14 @@ int TpagedListNext(int xid, lladd_pagedList_iterator * it,
|
|||
it->entryRid.size = TrecordSize(xid, it->entryRid);
|
||||
assert(it->entryRid.size != -1);
|
||||
|
||||
pagedListEntry * entry = malloc(it->entryRid.size);
|
||||
pagedListEntry * entry = (pagedListEntry*)malloc(it->entryRid.size);
|
||||
Tread(xid, it->entryRid, entry);
|
||||
|
||||
*keySize = entry->keySize;
|
||||
*valueSize = it->entryRid.size - *keySize - sizeof(pagedListEntry);
|
||||
|
||||
*key = malloc(*keySize);
|
||||
*value = malloc(*valueSize);
|
||||
*key = stasis_malloc(*keySize, byte);
|
||||
*value = stasis_malloc(*valueSize, byte);
|
||||
|
||||
memcpy(*key, entry+1, *keySize);
|
||||
memcpy(*value, ((byte*)(entry+1))+*keySize, *valueSize);
|
||||
|
|
|
@ -95,12 +95,12 @@ ssize_t Tpread(int xid, byte* buf, size_t count, off_t offset) {
|
|||
return read_write_helper(1, xid, -1, buf, count, offset);
|
||||
}
|
||||
ssize_t Tpwrite(int xid, const byte * buf, size_t count, off_t offset) {
|
||||
byte * buf2 = malloc(count);
|
||||
byte * buf2 = stasis_malloc(count, byte);
|
||||
|
||||
read_write_helper(1, xid, -1, buf2, count, offset);
|
||||
|
||||
size_t entrylen = sizeof(segment_file_arg_t) + 2*count;
|
||||
segment_file_arg_t * entry = malloc(entrylen);
|
||||
segment_file_arg_t * entry = (segment_file_arg_t*)malloc(entrylen);
|
||||
entry->offset = offset;
|
||||
memcpy((entry+1), buf, count);
|
||||
memcpy(((byte*)(entry+1))+count, buf2, count);
|
||||
|
|
|
@ -112,7 +112,7 @@ Page * TsetWithPage(int xid, recordid rid, Page *p, const void * dat) {
|
|||
unlock(p->rwlatch);
|
||||
|
||||
size_t sz = sizeof(slotid_t) + sizeof(int64_t) + 2 * rid.size;
|
||||
byte * const buf = malloc(sz);
|
||||
byte * const buf = stasis_malloc(sz, byte);
|
||||
|
||||
byte * b = buf;
|
||||
*(slotid_t*) b = rid.slot; b += sizeof(slotid_t);
|
||||
|
@ -136,7 +136,7 @@ int Tset(int xid, recordid rid, const void * dat) {
|
|||
int TsetRaw(int xid, recordid rid, const void * dat) {
|
||||
rid.size = stasis_record_type_to_size(rid.size);
|
||||
size_t sz = sizeof(slotid_t) + sizeof(int64_t) + 2 * rid.size;
|
||||
byte * const buf = malloc(sz);
|
||||
byte * const buf = stasis_malloc(sz, byte);
|
||||
|
||||
byte * b = buf;
|
||||
*(slotid_t*) b = rid.slot; b += sizeof(slotid_t);
|
||||
|
@ -160,7 +160,7 @@ static int op_set_range(const LogEntry* e, Page* p) {
|
|||
rid.size = stasis_record_length_read(e->xid,p,rid);
|
||||
|
||||
byte * data = (byte*)(range + 1);
|
||||
byte * tmp = malloc(rid.size);
|
||||
byte * tmp = stasis_malloc(rid.size, byte);
|
||||
|
||||
stasis_record_read(e->xid, p, rid, tmp);
|
||||
|
||||
|
@ -182,7 +182,7 @@ static int op_set_range_inverse(const LogEntry* e, Page* p) {
|
|||
rid.size = stasis_record_length_read(e->xid,p,rid);
|
||||
|
||||
byte * data = (byte*)(range + 1) + diffLength;
|
||||
byte * tmp = malloc(rid.size);
|
||||
byte * tmp = stasis_malloc(rid.size, byte);
|
||||
|
||||
stasis_record_read(e->xid, p, rid, tmp);
|
||||
memcpy(tmp+range->offset, data, diffLength);
|
||||
|
@ -195,7 +195,7 @@ void TsetRange(int xid, recordid rid, int offset, int length, const void * dat)
|
|||
Page * p = loadPage(xid, rid.page);
|
||||
|
||||
/// XXX rewrite without malloc (use read_begin, read_done)
|
||||
set_range_t * range = malloc(sizeof(set_range_t) + 2 * length);
|
||||
set_range_t * range = stasis_malloc(2 * length, set_range_t);
|
||||
|
||||
range->offset = offset;
|
||||
range->slot = rid.slot;
|
||||
|
|
|
@ -381,10 +381,10 @@ block_t genericBlock = {
|
|||
};
|
||||
|
||||
block_t* stasis_block_first_default_impl(int xid, Page * p) {
|
||||
block_t* ret = malloc(sizeof(block_t));
|
||||
block_t* ret = stasis_malloc(1, block_t);
|
||||
*ret = genericBlock;
|
||||
genericBlockImpl impl = { p, NULLRID };
|
||||
ret->impl = malloc(sizeof(genericBlockImpl));
|
||||
ret->impl = stasis_malloc(1, genericBlockImpl);
|
||||
*(genericBlockImpl*)(ret->impl) = impl;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ static void phPrefetchRange(stasis_page_handle_t *ph, pageid_t pageid, pageid_t
|
|||
lsn_t off = pageid * PAGE_SIZE;
|
||||
lsn_t len = count * PAGE_SIZE;
|
||||
|
||||
byte * buf = malloc(len);
|
||||
byte * buf = stasis_malloc(len, byte);
|
||||
|
||||
((stasis_handle_t*)ph->impl)->read(ph->impl, off, buf, len);
|
||||
|
||||
|
@ -81,7 +81,7 @@ static void phClose(stasis_page_handle_t * ph) {
|
|||
free(ph);
|
||||
}
|
||||
static stasis_page_handle_t * phDup(stasis_page_handle_t * ph, int is_sequential) {
|
||||
stasis_page_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
||||
memcpy(ret, ph, sizeof(*ret));
|
||||
ret->impl = ((stasis_handle_t*)ret->impl)->dup(ret->impl);
|
||||
if(((stasis_handle_t*)ret->impl)->error != 0) {
|
||||
|
@ -97,7 +97,7 @@ static stasis_page_handle_t * phDup(stasis_page_handle_t * ph, int is_sequential
|
|||
stasis_page_handle_t * stasis_page_handle_open(stasis_handle_t * handle,
|
||||
stasis_log_t * log, stasis_dirty_page_table_t * dpt) {
|
||||
DEBUG("Using pageHandle implementation\n");
|
||||
stasis_page_handle_t * ret = malloc(sizeof(*ret));
|
||||
stasis_page_handle_t * ret = stasis_malloc(1, stasis_page_handle_t);
|
||||
ret->write = phWrite;
|
||||
ret->read = phRead;
|
||||
ret->prefetch_range = phPrefetchRange;
|
||||
|
|
|
@ -67,7 +67,7 @@ static void stasis_recovery_analysis(stasis_log_t* log, stasis_transaction_table
|
|||
*/
|
||||
|
||||
if(xactLSN == NULL) {
|
||||
xactLSN = malloc(sizeof(lsn_t));
|
||||
xactLSN = stasis_malloc(1, lsn_t);
|
||||
lhinsert(transactionLSN, &(e->xid), sizeof(int), xactLSN);
|
||||
} else {
|
||||
/* We've seen this xact before, and must have put a value in
|
||||
|
|
|
@ -115,8 +115,8 @@ static void clockInsert (struct replacementPolicy* impl, Page* page) {
|
|||
}
|
||||
|
||||
replacementPolicy* replacementPolicyClockInit(Page * pageArray, int page_count) {
|
||||
replacementPolicy *ret = malloc(sizeof(*ret));
|
||||
stasis_replacement_policy_clock_t * clock = malloc(sizeof(*clock));
|
||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
||||
stasis_replacement_policy_clock_t * clock = stasis_malloc(1, stasis_replacement_policy_clock_t);
|
||||
clock->pages = pageArray;
|
||||
clock->page_count = page_count;
|
||||
clock->ptr = 0;
|
||||
|
|
|
@ -144,8 +144,8 @@ static void cwInsert (struct replacementPolicy* impl, Page* page) {
|
|||
}
|
||||
|
||||
replacementPolicy* replacementPolicyConcurrentWrapperInit(replacementPolicy** rp, int count) {
|
||||
replacementPolicy *ret = malloc(sizeof(*ret));
|
||||
stasis_replacement_policy_concurrent_wrapper_t * rpw = malloc(sizeof(*rpw));
|
||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
||||
stasis_replacement_policy_concurrent_wrapper_t * rpw = stasis_malloc(1, stasis_replacement_policy_concurrent_wrapper_t);
|
||||
|
||||
if(stasis_replacement_policy_concurrent_wrapper_power_of_two_buckets) {
|
||||
// ensure that count is a power of two.
|
||||
|
@ -154,8 +154,8 @@ replacementPolicy* replacementPolicyConcurrentWrapperInit(replacementPolicy** rp
|
|||
count = 1; bits --;
|
||||
while(bits > 0) { count *= 2; bits--; }
|
||||
}
|
||||
rpw->mut = malloc(sizeof(rpw->mut[0]) * count);
|
||||
rpw->impl = malloc(sizeof(rpw->impl[0]) * count);
|
||||
rpw->mut = stasis_malloc(count, pthread_mutex_t);
|
||||
rpw->impl = stasis_malloc(count, replacementPolicy*);
|
||||
for(int i = 0; i < count; i++) {
|
||||
pthread_mutex_init(&rpw->mut[i],0);
|
||||
rpw->impl[i] = rp[i];
|
||||
|
|
|
@ -77,7 +77,7 @@ static Page* stasis_replacement_policy_lru_get_stale_and_remove(replacementPolic
|
|||
|
||||
static void stasis_replacement_policy_lru_insert(replacementPolicy* r, Page* p) {
|
||||
stasis_replacement_policy_lru_t * l = r->impl;
|
||||
stasis_replacement_policy_lru_entry * e = malloc(sizeof(stasis_replacement_policy_lru_entry));
|
||||
stasis_replacement_policy_lru_entry * e = stasis_malloc(1, stasis_replacement_policy_lru_entry);
|
||||
e->value = p;
|
||||
e->clock = l->now;
|
||||
l->now++;
|
||||
|
@ -87,8 +87,8 @@ static void stasis_replacement_policy_lru_insert(replacementPolicy* r, Page* p)
|
|||
}
|
||||
|
||||
replacementPolicy * stasis_replacement_policy_lru_init() {
|
||||
replacementPolicy * ret = malloc(sizeof(replacementPolicy));
|
||||
stasis_replacement_policy_lru_t * l = malloc(sizeof(stasis_replacement_policy_lru_t));
|
||||
replacementPolicy * ret = stasis_malloc(1, replacementPolicy);
|
||||
stasis_replacement_policy_lru_t * l = stasis_malloc(1, stasis_replacement_policy_lru_t);
|
||||
l->now = 0;
|
||||
l->hash = LH_ENTRY(create)(10);
|
||||
l->lru = RB_ENTRY(init)(stasis_replacement_policy_lru_entry_cmp, 0);
|
||||
|
|
|
@ -110,14 +110,14 @@ static void stasis_lru_fast_deinit(struct replacementPolicy * r) {
|
|||
free(r);
|
||||
}
|
||||
replacementPolicy * lruFastInit() {
|
||||
struct replacementPolicy * ret = malloc(sizeof(struct replacementPolicy));
|
||||
struct replacementPolicy * ret = stasis_malloc(1, struct replacementPolicy);
|
||||
ret->deinit = stasis_lru_fast_deinit;
|
||||
ret->hit = stasis_lru_fast_hit;
|
||||
ret->getStale = stasis_lru_fast_getStale;
|
||||
ret->remove = stasis_lru_fast_remove;
|
||||
ret->getStaleAndRemove = stasis_lru_fast_getStaleAndRemove;
|
||||
ret->insert = stasis_lru_fast_insert;
|
||||
lruFast * l = malloc(sizeof(lruFast));
|
||||
lruFast * l = stasis_malloc(1, lruFast);
|
||||
llInit(&l->list);
|
||||
ret->impl = l;
|
||||
return ret;
|
||||
|
|
|
@ -52,8 +52,8 @@ static void tsInsert (struct replacementPolicy* impl, Page* page) {
|
|||
}
|
||||
|
||||
replacementPolicy* replacementPolicyThreadsafeWrapperInit(replacementPolicy* rp) {
|
||||
replacementPolicy *ret = malloc(sizeof(*ret));
|
||||
stasis_replacement_policy_threadsafe_wrapper_t * rpw = malloc(sizeof(*rpw));
|
||||
replacementPolicy *ret = stasis_malloc(1, replacementPolicy);
|
||||
stasis_replacement_policy_threadsafe_wrapper_t * rpw = stasis_malloc(1, stasis_replacement_policy_threadsafe_wrapper_t);
|
||||
rpw->impl = rp;
|
||||
pthread_mutex_init(&rpw->mut,0);
|
||||
ret->init = NULL;
|
||||
|
|
|
@ -172,7 +172,7 @@ int stasis_transaction_table_set_argument(stasis_transaction_table_t *tbl, int x
|
|||
}
|
||||
|
||||
int* stasis_transaction_table_list_active(stasis_transaction_table_t *tbl, int *count) {
|
||||
int * ret = malloc(sizeof(*ret));
|
||||
int * ret = stasis_malloc(1, int);
|
||||
ret[0] = INVALID_XID;
|
||||
*count = 0;
|
||||
for(int i = 0; i < MAX_TRANSACTIONS; i++) {
|
||||
|
@ -188,7 +188,7 @@ int* stasis_transaction_table_list_active(stasis_transaction_table_t *tbl, int *
|
|||
}
|
||||
|
||||
stasis_transaction_table_t * stasis_transaction_table_init() {
|
||||
stasis_transaction_table_t * tbl = malloc(sizeof(*tbl));
|
||||
stasis_transaction_table_t * tbl = stasis_malloc(1, stasis_transaction_table_t);
|
||||
tbl->active_count = 0;
|
||||
|
||||
#ifndef HAVE_GCC_ATOMICS
|
||||
|
@ -307,7 +307,7 @@ stasis_transaction_table_entry_t * stasis_transaction_table_begin(stasis_transac
|
|||
struct stasis_transaction_table_thread_local_state_t * tls = pthread_getspecific(tbl->key);
|
||||
|
||||
if(tls == NULL) {
|
||||
tls = malloc(sizeof(*tls));
|
||||
tls = stasis_malloc(1, struct stasis_transaction_table_thread_local_state_t);
|
||||
tls->last_entry = 0;
|
||||
tls->num_entries = 0;
|
||||
tls->entries = NULL;
|
||||
|
|
|
@ -27,7 +27,7 @@ struct stasis_truncation_t {
|
|||
#endif
|
||||
stasis_truncation_t * stasis_truncation_init(stasis_dirty_page_table_t * dpt, stasis_transaction_table_t * tbl,
|
||||
stasis_buffer_manager_t *buffer_manager, stasis_log_t *log) {
|
||||
stasis_truncation_t * ret = malloc(sizeof(*ret));
|
||||
stasis_truncation_t * ret = stasis_malloc(1, stasis_truncation_t);
|
||||
ret->initialized = 1;
|
||||
ret->automaticallyTruncating = 0;
|
||||
pthread_mutex_init(&ret->shutdown_mutex, 0);
|
||||
|
|
|
@ -78,7 +78,7 @@ stasis_bloom_filter_t * stasis_bloom_filter_create(uint64_t(*func_a)(const char*
|
|||
uint64_t(*func_b)(const char*,int),
|
||||
uint64_t num_expected_items,
|
||||
double false_positive_rate) {
|
||||
stasis_bloom_filter_t * ret = malloc(sizeof(*ret));
|
||||
stasis_bloom_filter_t * ret = stasis_malloc(1, stasis_bloom_filter_t);
|
||||
ret->func_a = func_a;
|
||||
ret->func_b = func_b;
|
||||
ret->num_expected_items = num_expected_items;
|
||||
|
@ -86,7 +86,7 @@ stasis_bloom_filter_t * stasis_bloom_filter_create(uint64_t(*func_a)(const char*
|
|||
ret->num_buckets = stasis_bloom_filter_calc_num_buckets(ret->num_expected_items, ret->desired_false_positive_rate);
|
||||
ret->buckets = calloc((ret->num_buckets / 8) + ((ret->num_buckets % 8 == 0) ? 0 : 1), 1);
|
||||
ret->num_functions = stasis_bloom_filter_calc_num_functions(ret->num_expected_items, ret->num_buckets);
|
||||
ret->result_scratch_space = malloc(sizeof(*ret->result_scratch_space) * ret->num_functions);
|
||||
ret->result_scratch_space = stasis_malloc(ret->num_functions, uint64_t);
|
||||
ret->actual_number_of_items = 0;
|
||||
return ret;
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ void stasis_bloom_filter_insert(stasis_bloom_filter_t * bf, const char *key, int
|
|||
}
|
||||
int stasis_bloom_filter_lookup(stasis_bloom_filter_t * bf, const char * key, int len) {
|
||||
int ret = 1;
|
||||
uint64_t * scratch = malloc(sizeof(*scratch) * bf->num_functions);
|
||||
uint64_t * scratch = stasis_malloc(bf->num_functions, uint64_t);
|
||||
stasis_bloom_filter_calc_functions(bf, scratch, key, len);
|
||||
for(int i = 0; i < bf->num_functions; i++) {
|
||||
ret = ret && stasis_bloom_filter_get_bit(bf, scratch[i]);
|
||||
|
|
|
@ -422,7 +422,7 @@ hashtable_t * hashtable_init(pageid_t size) {
|
|||
size /= 2;
|
||||
newsize *= 2;
|
||||
}
|
||||
hashtable_t *ht = malloc(sizeof(*ht));
|
||||
hashtable_t *ht = stasis_malloc(1, hashtable_t);
|
||||
|
||||
ht->maxbucketid = (newsize) - 1;
|
||||
ht->buckets = calloc(ht->maxbucketid+1, sizeof(bucket_t));
|
||||
|
|
|
@ -15,15 +15,15 @@ static inline struct LL_ENTRY(node_t)* LL_ENTRY(shiftNode) (struct LL_ENTRY(list
|
|||
static inline void LL_ENTRY(removeNode)(list * l, node_t * n);
|
||||
|
||||
list * LL_ENTRY(create)(node_t*(*getNode)(value_t*v,void*conf), void(*setNode)(value_t*v,node_t*n,void*conf),void*conf) {
|
||||
list* ret = malloc(sizeof(list));
|
||||
list* ret = stasis_malloc(1, list);
|
||||
|
||||
// bypass const annotation on head, tail...
|
||||
list tmp = {
|
||||
getNode,
|
||||
setNode,
|
||||
conf,
|
||||
malloc(sizeof(node_t)),
|
||||
malloc(sizeof(node_t))
|
||||
stasis_malloc(1,node_t),
|
||||
stasis_malloc(1,node_t)
|
||||
};
|
||||
memcpy(ret, &tmp, sizeof(list));
|
||||
|
||||
|
@ -44,7 +44,7 @@ void LL_ENTRY(destroy)(list* l) {
|
|||
free(l);
|
||||
}
|
||||
int LL_ENTRY(push)(list* l, value_t * v) {
|
||||
node_t * n = malloc(sizeof(node_t));
|
||||
node_t * n = stasis_malloc(1, node_t);
|
||||
if(!n) { return ENOMEM; }
|
||||
n->v = v;
|
||||
assert(l->getNode(v, l->conf) == 0);
|
||||
|
@ -65,7 +65,7 @@ value_t* LL_ENTRY(pop) (list* l) {
|
|||
}
|
||||
}
|
||||
int LL_ENTRY(unshift)(list* l, value_t * v) {
|
||||
node_t * n = malloc(sizeof(node_t));
|
||||
node_t * n = stasis_malloc(1, node_t);
|
||||
if(!n) { return ENOMEM; }
|
||||
n->v = v;
|
||||
assert(l->getNode(v, l->conf) == 0);
|
||||
|
|
|
@ -129,14 +129,14 @@ static struct LH_ENTRY(pair_t)* insertIntoLinkedList(struct LH_ENTRY(table) * t
|
|||
assert(table->bucketList[bucket].next == 0);
|
||||
|
||||
thePair = &(table->bucketList[bucket]);
|
||||
thePair->key = malloc(len);
|
||||
thePair->key = (const LH_ENTRY(key_t) *)malloc(len);
|
||||
thePair->keyLength = len;
|
||||
memcpy(((void*)thePair->key), key, len);
|
||||
thePair->value = value;
|
||||
} else {
|
||||
// the bucket isn't empty.
|
||||
thePair = malloc(sizeof(struct LH_ENTRY(pair_t)));
|
||||
thePair->key = malloc(len);
|
||||
thePair = stasis_malloc(1, struct LH_ENTRY(pair_t));
|
||||
thePair->key = (const LH_ENTRY(key_t) *)malloc(len);
|
||||
memcpy((void*)thePair->key, key, len);
|
||||
thePair->keyLength = len;
|
||||
thePair->value = value;
|
||||
|
@ -220,7 +220,7 @@ static void extendHashTable(struct LH_ENTRY(table) * table) {
|
|||
|
||||
|
||||
struct LH_ENTRY(table) * LH_ENTRY(create)(int initialSize) {
|
||||
struct LH_ENTRY(table) * ret = malloc(sizeof(struct LH_ENTRY(table)));
|
||||
struct LH_ENTRY(table) * ret = stasis_malloc(1, struct LH_ENTRY(table));
|
||||
ret->bucketList = calloc(initialSize, sizeof(struct LH_ENTRY(pair_t)));
|
||||
HASH_ENTRY(_get_size_params)(initialSize,
|
||||
&(ret->bucketListBits),
|
||||
|
@ -466,7 +466,7 @@ void * pblHtFirst ( pblHashTable_t * h ) {
|
|||
if(pblLists == 0) {
|
||||
pblLists = LH_ENTRY(create)(10);
|
||||
}
|
||||
struct LH_ENTRY(list) *list = malloc(sizeof(struct LH_ENTRY(list)));
|
||||
struct LH_ENTRY(list) *list = stasis_malloc(1, struct LH_ENTRY(list));
|
||||
struct LH_ENTRY(list) * oldList;
|
||||
|
||||
if((oldList = LH_ENTRY(insert)(pblLists,
|
||||
|
|
|
@ -56,15 +56,15 @@ void printList(LinkedList **l) {
|
|||
printf (".\n");
|
||||
}
|
||||
void addVal(LinkedList **list, long val) {
|
||||
LinkedList * new = (LinkedList *)malloc(sizeof(LinkedList));
|
||||
new->val = val;
|
||||
new->next = NULL;
|
||||
LinkedList * node = stasis_malloc(1, LinkedList);
|
||||
node->val = val;
|
||||
node->next = NULL;
|
||||
if (*list==NULL) {
|
||||
*list = new;
|
||||
*list = node;
|
||||
}
|
||||
else {
|
||||
new->next = *list;
|
||||
*list = new;
|
||||
node->next = *list;
|
||||
*list = node;
|
||||
}
|
||||
}
|
||||
void removeVal(LinkedList **list, long val) {
|
||||
|
@ -105,12 +105,12 @@ long popMaxVal(LinkedList **list) {
|
|||
void addSortedVal(LinkedList **list, long val) {
|
||||
LinkedList * tmp;
|
||||
LinkedList * tmpprev;
|
||||
LinkedList * new = malloc(sizeof(LinkedList));
|
||||
new->val = val;
|
||||
LinkedList * node = stasis_malloc(1, LinkedList);
|
||||
node->val = val;
|
||||
/*see if new entry should come in the beginning*/
|
||||
if ((*list==NULL) || ((*list)->val<val)) {
|
||||
new->next = *list;
|
||||
*list = new;
|
||||
node->next = *list;
|
||||
*list = node;
|
||||
return;
|
||||
}
|
||||
/*else determine where to put new entry*/
|
||||
|
@ -118,16 +118,16 @@ void addSortedVal(LinkedList **list, long val) {
|
|||
tmpprev = *list;
|
||||
while (tmp!=NULL) {
|
||||
if (tmp->val<val) {
|
||||
tmpprev->next = new;
|
||||
new->next = tmp;
|
||||
tmpprev->next = node;
|
||||
node->next = tmp;
|
||||
return;
|
||||
}
|
||||
tmpprev = tmp;
|
||||
tmp = tmp->next;
|
||||
}
|
||||
/*if gotten here, tmp is null so put item at the end of the list*/
|
||||
new->next = NULL;
|
||||
tmpprev->next = new;
|
||||
node->next = NULL;
|
||||
tmpprev->next = node;
|
||||
}
|
||||
/*
|
||||
return 1 if val is in the list, 0 otherwise
|
||||
|
|
|
@ -24,7 +24,7 @@ static void free_key(void * key) {
|
|||
}
|
||||
|
||||
stasis_aggregate_min_t * stasis_aggregate_min_init(int large) {
|
||||
stasis_aggregate_min_t * ret = malloc(sizeof(*ret));
|
||||
stasis_aggregate_min_t * ret = stasis_malloc(1, stasis_aggregate_min_t);
|
||||
if(large) {
|
||||
ret->tree = rbinit(cmp_lsn_t,0);
|
||||
} else {
|
||||
|
@ -58,7 +58,7 @@ void stasis_aggregate_min_add(stasis_aggregate_min_t * min, lsn_t * a) {
|
|||
}
|
||||
lsn_t * p = pthread_getspecific(min->key);
|
||||
if(!p) {
|
||||
p = malloc(sizeof(lsn_t));
|
||||
p = stasis_malloc(1, lsn_t);
|
||||
*p = -1;
|
||||
pthread_setspecific(min->key, p);
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ struct stasis_util_multiset_t {
|
|||
};
|
||||
|
||||
stasis_util_multiset_t * stasis_util_multiset_create() {
|
||||
stasis_util_multiset_t * set = malloc(sizeof(*set));
|
||||
set->items = malloc(sizeof(lsn_t));
|
||||
stasis_util_multiset_t * set = stasis_malloc(1, stasis_util_multiset_t);
|
||||
set->items = stasis_malloc(1, lsn_t);
|
||||
set->item_count = 0;
|
||||
return set;
|
||||
}
|
||||
|
|
|
@ -144,9 +144,8 @@ RB_STATIC struct RB_ENTRY(tree) *RB_ENTRY(init)(void)
|
|||
warned = 1;
|
||||
}
|
||||
struct RB_ENTRY(tree) *retval;
|
||||
char c;
|
||||
|
||||
c=rcsid[0]; /* This does nothing but shutup the -Wall */
|
||||
(void)rcsid; /* This does nothing but shutup the -Wall */
|
||||
|
||||
if ((retval=(struct RB_ENTRY(tree) *) malloc(sizeof(struct RB_ENTRY(tree))))==NULL)
|
||||
return(NULL);
|
||||
|
@ -452,6 +451,8 @@ RB_ENTRY(_lookup)(int mode, const RB_ENTRY(data_t) *key, struct RB_ENTRY(tree) *
|
|||
int cmp=0;
|
||||
int found=0;
|
||||
int first = 1;
|
||||
(void)first; /* Silence compiler warning */
|
||||
|
||||
y=RBNULL; /* points to the parent of x */
|
||||
x=rbinfo->rb_root;
|
||||
|
||||
|
|
|
@ -272,7 +272,7 @@ stasis_ringbuffer_t * stasis_ringbuffer_init(intptr_t base, lsn_t initial_offset
|
|||
return 0;
|
||||
}
|
||||
|
||||
stasis_ringbuffer_t * ring = malloc(sizeof(*ring));
|
||||
stasis_ringbuffer_t * ring = stasis_malloc(1, stasis_ringbuffer_t);
|
||||
|
||||
// Allocate the memory region using mmap black magic.
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@ struct stasis_util_slab_t {
|
|||
* @return a slab allocator. Calling stasis_util_slab_destroy() will deallocate it all-at-once.
|
||||
*/
|
||||
stasis_util_slab_t * stasis_util_slab_create(uint32_t obj_sz, uint32_t block_sz) {
|
||||
stasis_util_slab_t* ret = malloc(sizeof(*ret));
|
||||
stasis_util_slab_t* ret = stasis_malloc(1, stasis_util_slab_t);
|
||||
|
||||
// printf("slab init: obj siz = %lld, block_sz = %lld\n", (long long)obj_sz, (long long)block_sz);
|
||||
|
||||
ret->blocks = malloc(sizeof(ret->blocks[0]));
|
||||
ret->blocks[0] = malloc(block_sz);
|
||||
ret->blocks = stasis_malloc(1, byte*);
|
||||
ret->blocks[0] = stasis_malloc(block_sz, byte);
|
||||
ret->freelist_ptr = 0;
|
||||
|
||||
ret->obj_sz = obj_sz;
|
||||
|
@ -72,7 +72,7 @@ void* stasis_util_slab_malloc(stasis_util_slab_t * slab) {
|
|||
(slab->this_block) ++;
|
||||
slab->blocks = realloc(slab->blocks, (slab->this_block+1) * sizeof(slab->blocks[0]));
|
||||
assert(slab->blocks);
|
||||
slab->blocks[slab->this_block] = malloc(slab->block_sz);
|
||||
slab->blocks[slab->this_block] = stasis_malloc(slab->block_sz, byte);
|
||||
assert(slab->blocks[slab->this_block]);
|
||||
}
|
||||
slab->this_block_count ++;
|
||||
|
|
|
@ -179,4 +179,6 @@ extern long long *stasis_dbug_timestamp;
|
|||
|
||||
#include <pthread.h>
|
||||
|
||||
#include <stasis/util/malloc.h>
|
||||
|
||||
#endif /* __stasis_common_h */
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace rose {
|
|||
|
||||
plugin_id_t pluginid = plugin_id<FORMAT, COMPRESSOR, typename COMPRESSOR::TYP>();
|
||||
|
||||
plugin_id_t * plugins = (plugin_id_t*)malloc(column_count * sizeof(plugin_id_t));
|
||||
plugin_id_t * plugins = stasis_malloc(column_count, plugin_id_t);
|
||||
for(column_number_t c = 0; c < column_count; c++) {
|
||||
plugins[c] = pluginid;
|
||||
}
|
||||
|
|
|
@ -631,8 +631,7 @@ namespace rose {
|
|||
gettimeofday(&stop_tv,0);
|
||||
stop = tv_to_double(stop_tv);
|
||||
|
||||
typeof(h->scratch_tree)* tmp_ptr
|
||||
= (typeof(h->scratch_tree)*) malloc(sizeof(void*));
|
||||
typeof(h->scratch_tree)* tmp_ptr = (typeof(h->scratch_tree)*) malloc(sizeof(void*));
|
||||
*tmp_ptr = h->scratch_tree;
|
||||
*(h->input_handle) = tmp_ptr;
|
||||
|
||||
|
@ -857,7 +856,7 @@ namespace rose {
|
|||
LSM_ITER* c1p = new LSM_ITER(*h->args1->in_tree ? **h->args1->in_tree : 0 , val);
|
||||
LSM_ITER* c2 = new LSM_ITER( h->args1->my_tree , val);
|
||||
|
||||
void ** ret = (void**)malloc(10 * sizeof(void*));
|
||||
void ** ret = stasis_malloc(10, void*);
|
||||
|
||||
ret[0] = c0;
|
||||
ret[1] = c0p;
|
||||
|
|
|
@ -128,7 +128,7 @@ lladd_hash_iterator * ThashIterator(int xid, recordid hash, int keySize, int val
|
|||
@param it The iterator that will be traversed. @see ThashIterator().
|
||||
@param key a pointer to an uninitialized pointer value. If another entry is
|
||||
encountered, then the uninitialized pointer value will be set to point
|
||||
to a malloc()'ed region of memory that contains the value's key. This
|
||||
to a malloc'ed region of memory that contains the value's key. This
|
||||
region of memory should be manually free()'ed by the application. LLADD
|
||||
normally leaves memory management to the application. However, once
|
||||
hashes with variable size entries are supported, it would be extremely
|
||||
|
|
|
@ -80,7 +80,7 @@ int stasis_util_skiplist_default_key_finalize(void * p, void * ignored) {
|
|||
static inline int stasis_util_skiplist_random_level(pthread_key_t k) {
|
||||
kiss_table_t * kiss = pthread_getspecific(k);
|
||||
if(kiss == 0) {
|
||||
kiss = malloc(sizeof(*kiss));
|
||||
kiss = stasis_malloc(1, kiss_table_t);
|
||||
stasis_util_random_kiss_settable(kiss,
|
||||
random(), random(), random(), random(), random(), random());
|
||||
pthread_setspecific(k, kiss);
|
||||
|
@ -97,7 +97,7 @@ static inline int stasis_util_skiplist_random_level(pthread_key_t k) {
|
|||
|
||||
static inline hazard_ptr stasis_util_skiplist_make_node(int level, void * key) {
|
||||
stasis_skiplist_node_t * x
|
||||
= malloc(sizeof(*x)
|
||||
= (stasis_skiplist_node_t*)malloc(sizeof(*x)
|
||||
+ (level) * (sizeof(hazard_ptr) + sizeof(pthread_mutex_t)));
|
||||
x->key = (hazard_ptr)key;
|
||||
x->level = level;
|
||||
|
@ -134,7 +134,7 @@ static inline int stasis_util_skiplist_cmp_helper2(
|
|||
static inline stasis_skiplist_t * stasis_util_skiplist_init(
|
||||
int (*cmp)(const void*, const void*),
|
||||
int (*finalize)(void *, void * nul)) {
|
||||
stasis_skiplist_t * list = malloc(sizeof(*list));
|
||||
stasis_skiplist_t * list = stasis_malloc(1, stasis_skiplist_t);
|
||||
list->levelCap = 32;
|
||||
list->h = hazard_init(STASIS_SKIPLIST_HP_COUNT+list->levelCap,
|
||||
STASIS_SKIPLIST_HP_COUNT, 250, stasis_util_skiplist_node_finalize, list);
|
||||
|
|
|
@ -127,7 +127,7 @@ static void hazard_deinit_thread(void * p) {
|
|||
*/
|
||||
static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
||||
int (*finalizer)(void*, void*), void * conf) {
|
||||
hazard_t * ret = malloc(sizeof(hazard_t));
|
||||
hazard_t * ret = stasis_malloc(1, hazard_t);
|
||||
pthread_key_create(&ret->hp, hazard_deinit_thread);
|
||||
ret->num_slots = hp_slots;
|
||||
ret->stack_start = stack_start;
|
||||
|
@ -142,7 +142,7 @@ static inline hazard_t* hazard_init(int hp_slots, int stack_start, int r_slots,
|
|||
static inline hazard_ptr_rec_t * hazard_ensure_tls(hazard_t * h) {
|
||||
hazard_ptr_rec_t * rec = pthread_getspecific(h->hp);
|
||||
if(rec == NULL) {
|
||||
rec = malloc(sizeof(hazard_ptr_rec_t));
|
||||
rec = stasis_malloc(1, hazard_ptr_rec_t);
|
||||
rec->hp = calloc(h->num_slots, sizeof(hazard_ptr));
|
||||
rec->rlist = calloc(h->num_r_slots, sizeof(hazard_ptr));
|
||||
rec->rlist_len = 0;
|
||||
|
|
|
@ -66,7 +66,7 @@ static inline void stasis_histogram_insert_log_timeval(stasis_histogram_64_t* hi
|
|||
|
||||
static inline void stasis_histogram_tick(stasis_histogram_64_t* hist) {
|
||||
struct timeval * val = pthread_getspecific(hist->tls);
|
||||
if(!val) { val = malloc(sizeof(*val)); pthread_setspecific(hist->tls, val); }
|
||||
if(!val) { val = stasis_malloc(1, struct timeval); pthread_setspecific(hist->tls, val); }
|
||||
gettimeofday(val,0);
|
||||
}
|
||||
static inline void stasis_histogram_tock(stasis_histogram_64_t* hist) {
|
||||
|
|
|
@ -33,7 +33,7 @@ BEGIN_C_DECLS
|
|||
typedef pthread_rwlock_t rwl;
|
||||
|
||||
static inline rwl* initlock(void) {
|
||||
rwl* ret = (rwl*)malloc(sizeof(*ret));
|
||||
rwl* ret = stasis_malloc(1, rwl);
|
||||
int err = pthread_rwlock_init(ret, 0);
|
||||
if(err) { perror("couldn't init rwlock"); abort(); }
|
||||
DEBUG("initlock(%llx)\n", (long long)ret);
|
||||
|
@ -119,7 +119,7 @@ typedef struct rwlc {
|
|||
} rwlc;
|
||||
|
||||
static inline rwlc* rwlc_initlock(void) {
|
||||
rwlc* ret = (rwlc*)malloc(sizeof(*ret));
|
||||
rwlc* ret = stasis_malloc(1, rwlc);
|
||||
ret->rw = initlock();
|
||||
int err = pthread_mutex_init(&ret->mut, 0);
|
||||
ret->is_writelocked = 0;
|
||||
|
|
Loading…
Reference in a new issue