move umem_startup constructor to the umem .so itself, rather than

the malloc replacement.

Rename ec_atomic to umem_atomic.

Add a posix_memalign function.

-ldl isn't universal; make a configure check for it.
This commit is contained in:
Wez Furlong 2010-06-26 13:10:39 +00:00
parent 49b7cfb4d4
commit b9dc821378
5 changed files with 43 additions and 25 deletions

View file

@ -1,7 +1,7 @@
lib_LTLIBRARIES = libumem.la libumem_malloc.la
noinst_PROGRAMS = umem_test umem_test2 umem_test3
libumem_la_LDFLAGS = -lpthread -ldl
libumem_la_LDFLAGS = -lpthread
umem_test_SOURCES = umem_test.c
umem_test_LDADD = -lumem
@ -36,7 +36,7 @@ libumem_la_SOURCES = init_lib.c \
sys/vmem_impl_user.h
libumem_malloc_la_SOURCES = malloc.c
libumem_malloc_la_LDFLAGS = -lpthread -ldl -R$(libdir) -lumem
libumem_malloc_la_LDFLAGS = -lpthread -R$(libdir) -lumem
man3_MANS = umem_alloc.3 umem_cache_create.3 umem_debug.3
EXTRA_DIST = COPYRIGHT OPENSOLARIS.LICENSE umem.spec Doxyfile umem_test4 \

View file

@ -21,6 +21,7 @@ int main(void){return (sizeof(pthread_mutex_t) > 24);}
]
)
AC_CHECK_LIB(dl,dlopen)
AC_CHECK_HEADERS([sys/mman.h sys/sysmacros.h sys/time.h malloc.h])
AC_CHECK_FUNCS([issetugid mallinfo malloc_stats])

View file

@ -226,6 +226,16 @@ memalign(size_t align, size_t size_arg)
return ((void *)ret);
}
int
posix_memalign(void **memptr, size_t alignment, size_t size)
{
*memptr = memalign(alignment, size);
if (*memptr) {
return 0;
}
return errno;
}
void *
valloc(size_t size)
{
@ -397,6 +407,11 @@ realloc(void *buf_arg, size_t newsize)
if (buf_arg == NULL)
return (malloc(newsize));
if (newsize == 0) {
free(buf_arg);
return (NULL);
}
/*
* get the old data size without freeing the buffer
*/
@ -417,9 +432,4 @@ realloc(void *buf_arg, size_t newsize)
return (buf);
}
void __attribute__((constructor))
__malloc_umem_init (void)
{
umem_startup(NULL, 0, 0, NULL, NULL);
}

View file

@ -109,19 +109,19 @@ static INLINE int thr_create(void *stack_base,
#endif
#ifdef ECELERITY
# include "ec_atomic.h"
# include "umem_atomic.h"
#else
# ifdef _WIN32
# define ec_atomic_inc(a) InterlockedIncrement(a)
# define ec_atomic_inc64(a) InterlockedIncrement64(a)
# define umem_atomic_inc(a) InterlockedIncrement(a)
# define umem_atomic_inc64(a) InterlockedIncrement64(a)
# elif defined(__MACH__)
# include <libkern/OSAtomic.h>
# define ec_atomic_inc(x) OSAtomicIncrement32Barrier((int32_t*)x)
# define umem_atomic_inc(x) OSAtomicIncrement32Barrier((int32_t*)x)
# if !defined(__ppc__)
# define ec_atomic_inc64(x) OSAtomicIncrement64Barrier((int64_t*)x)
# define umem_atomic_inc64(x) OSAtomicIncrement64Barrier((int64_t*)x)
# endif
# elif (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
static INLINE uint_t ec_atomic_cas(uint_t *mem, uint_t with, uint_t cmp)
static INLINE uint_t umem_atomic_cas(uint_t *mem, uint_t with, uint_t cmp)
{
uint_t prev;
asm volatile ("lock; cmpxchgl %1, %2"
@ -130,7 +130,7 @@ static INLINE uint_t ec_atomic_cas(uint_t *mem, uint_t with, uint_t cmp)
: "memory");
return prev;
}
static INLINE uint64_t ec_atomic_cas64(uint64_t *mem, uint64_t with,
static INLINE uint64_t umem_atomic_cas64(uint64_t *mem, uint64_t with,
uint64_t cmp)
{
uint64_t prev;
@ -153,34 +153,34 @@ static INLINE uint64_t ec_atomic_cas64(uint64_t *mem, uint64_t with,
# endif
return prev;
}
static INLINE uint64_t ec_atomic_inc64(uint64_t *mem)
static INLINE uint64_t umem_atomic_inc64(uint64_t *mem)
{
register uint64_t last;
do {
last = *mem;
} while (ec_atomic_cas64(mem, last+1, last) != last);
} while (umem_atomic_cas64(mem, last+1, last) != last);
return ++last;
}
# define ec_atomic_inc64 ec_atomic_inc64
# define umem_atomic_inc64 umem_atomic_inc64
# else
# error no atomic solution for your platform
# endif
# ifndef ec_atomic_inc
static INLINE uint_t ec_atomic_inc(uint_t *mem)
# ifndef umem_atomic_inc
static INLINE uint_t umem_atomic_inc(uint_t *mem)
{
register uint_t last;
do {
last = *mem;
} while (ec_atomic_cas(mem, last+1, last) != last);
} while (umem_atomic_cas(mem, last+1, last) != last);
return ++last;
}
# endif
# ifndef ec_atomic_inc64
# ifndef umem_atomic_inc64
/* yeah, it's not great. It's only used to bump failed allocation
* counts, so it is not critical right now. */
extern pthread_mutex_t umem_ppc_64inc_lock;
static INLINE uint64_t ec_atomic_inc64(uint64_t *val)
static INLINE uint64_t umem_atomic_inc64(uint64_t *val)
{
uint64_t rval;
pthread_mutex_lock(&umem_ppc_64inc_lock);
@ -189,7 +189,7 @@ static INLINE uint64_t ec_atomic_inc64(uint64_t *val)
pthread_mutex_unlock(&umem_ppc_64inc_lock);
return rval;
}
# define ec_atomic_inc64 ec_atomic_inc64
# define umem_atomic_inc64 umem_atomic_inc64
# define NEED_64_LOCK 1
# endif
@ -207,8 +207,8 @@ static INLINE uint64_t ec_atomic_inc64(uint64_t *val)
#define ISP2(x) (((x) & ((x) - 1)) == 0)
/* beware! umem only uses these atomic adds for incrementing by 1 */
#define atomic_add_64(lvalptr, delta) ec_atomic_inc64(lvalptr)
#define atomic_add_32_nv(a, b) ec_atomic_inc(a)
#define atomic_add_64(lvalptr, delta) umem_atomic_inc64(lvalptr)
#define atomic_add_32_nv(a, b) umem_atomic_inc(a)
#ifndef NANOSEC
#define NANOSEC 1000000000

7
umem.c
View file

@ -3193,3 +3193,10 @@ fail:
(void) mutex_unlock(&umem_init_lock);
return (0);
}
void __attribute__((constructor))
__umem_init (void)
{
umem_startup(NULL, 0, 0, NULL, NULL);
}