Fixes a number of memory leaks; adds pageCleanup() callback, and allows callers to use custom allocators with LSM trees

This commit is contained in:
Sears Russell 2007-08-20 21:58:20 +00:00
parent 9c1c284406
commit 5bd2138a8b
19 changed files with 78 additions and 17 deletions

View file

@ -4,7 +4,7 @@ LDADD=$(top_builddir)/src/stasis/libstasis.la \
if BUILD_BENCHMARKS
noinst_PROGRAMS=lhtableThreaded naiveHash logicalHash readLogicalHash naiveMultiThreaded logicalMultThreaded rawSet \
arrayListSet logicalMultiReaders linearHashNTA linkedListNTA pageOrientedListNTA \
linearHashNTAThreaded linearHashNTAMultiReader linearHashNTAWriteRequests transitiveClosure zeroCopy
linearHashNTAThreaded linearHashNTAMultiReader linearHashNTAWriteRequests transitiveClosure zeroCopy sequentialThroughput
endif
AM_CFLAGS=${GLOBAL_CFLAGS}

View file

@ -5,6 +5,9 @@ find . | grep \~$ | xargs rm -f
find . -name '*.bb' | xargs rm -f
find . -name '*.bbg' | xargs rm -f
find . -name '*.da' | xargs rm -f
find . -name '*.o' | xargs rm -f
find . -name '*.lo' | xargs rm -f
find . -name '*.Plo' | xargs rm -f
find . | perl -ne 'print if (/\/core(\.\d+)?$/)' | xargs rm -f
find . | perl -ne 'print if (/\/Makefile.in$/)' | xargs rm -f
find . | perl -ne 'print if (/\/storefile.txt$/)' | xargs rm -f

View file

@ -15,7 +15,8 @@ libstasis_la_SOURCES=crc32.c redblack.c lhtable.c doubleLinkedList.c common.c fl
operations/linearHashNTA.c operations/linkedListNTA.c \
operations/pageOrientedListNTA.c operations/bTree.c \
operations/regions.c operations/lsmTree.c \
io/rangeTracker.c io/memory.c io/file.c io/pfile.c io/non_blocking.c io/debug.c \
io/rangeTracker.c io/memory.c io/file.c io/pfile.c io/non_blocking.c \
io/debug.c \
bufferManager/pageArray.c bufferManager/bufferHash.c \
replacementPolicy/lru.c replacementPolicy/lruFast.c
AM_CFLAGS=${GLOBAL_CFLAGS}

View file

@ -231,10 +231,12 @@ void allocationPolicyDeinit(allocationPolicy * ap) {
RB_ENTRY(delete)(next, ap->availablePages);
free((void*)next);
}
LH_ENTRY(destroy)(ap->xidAlloced);
LH_ENTRY(destroy)(ap->xidDealloced);
RB_ENTRY(destroy)(ap->availablePages);
LH_ENTRY(destroy)(ap->pageOwners);
LH_ENTRY(destroy)(ap->allPages);
free(ap);
}

View file

@ -63,12 +63,13 @@ void writeBlob(int xid, Page * p2, lsn_t lsn, recordid rid, const byte * buf) {
static int notSupported(int xid, Page * p) { return 0; }
void blobLoaded(Page *p) {
static void blobLoaded(Page *p) {
p->LSN = *lsn_ptr(p);
}
void blobFlushed(Page *p) {
static void blobFlushed(Page *p) {
*lsn_ptr(p) = p->LSN;
}
static void blobCleanup(Page *p) { }
static page_impl pi = {
BLOB_PAGE,
@ -93,6 +94,7 @@ static page_impl pi = {
0, //XXX page_impl_dereference_identity,
blobLoaded,
blobFlushed,
blobCleanup
};
page_impl blobImpl() {
return pi;

View file

@ -109,6 +109,7 @@ inline static Page * writeBackOnePage() {
// printf("Write(%ld)\n", (long)victim->id);
pageWrite(victim);
pageCleanup(victim);
// Make sure that no one mistakenly thinks this is still a live copy.
victim->id = -1;
@ -277,6 +278,7 @@ static void bhBufDeinit() {
LH_ENTRY(openlist)(cachedPages, &iter);
while((next = LH_ENTRY(readlist)(&iter))) {
pageWrite((next->value));
pageCleanup((next->value)); // normally called by writeBackOnePage()
}
LH_ENTRY(closelist)(&iter);
LH_ENTRY(destroy)(cachedPages);
@ -299,6 +301,7 @@ static void bhSimulateBufferManagerCrash() {
Page * p = next->value;
writelock(p->rwlatch,0);
pageFlushed(p); // normally, pageWrite() would call this...
pageCleanup(p); // normally called by writeBackOnePage()
unlock(p->rwlatch);
}
LH_ENTRY(closelist)(&iter);
@ -331,7 +334,7 @@ void bhBufInit() {
cachedPages = LH_ENTRY(create)(MAX_BUFFER_SIZE);
freeListLength = MAX_BUFFER_SIZE / 10;
freeListLength = 9 * MAX_BUFFER_SIZE / 10;
freeLowWater = freeListLength - 5;
freeCount = 0;
pageCount = 0;

View file

@ -160,6 +160,9 @@ void TallocInit() {
allocPolicy = allocationPolicyInit();
// pthread_mutex_init(&talloc_mutex, NULL);
}
void TallocDeinit() {
allocationPolicyDeinit(allocPolicy);
}
static void reserveNewRegion(int xid) {
int firstPage = TregionAlloc(xid, TALLOC_REGION_SIZE, STORAGE_MANAGER_TALLOC);

View file

@ -76,6 +76,19 @@ void lsmTreeRegisterComparator(int id, lsm_comparator_t i) {
*/
static pageid_t defaultAllocator(int xid, void *ignored) {
return TpageAlloc(xid);
}
static pageid_t (*pageAllocator)(int xid, void *ignored) = defaultAllocator;
static void *pageAllocatorConfig;
void TlsmSetPageAllocator(pageid_t (*allocer)(int xid, void * ignored),
void * config) {
pageAllocator = allocer;
pageAllocatorConfig = config;
}
typedef struct lsmTreeState {
pageid_t lastLeaf;
@ -119,7 +132,7 @@ static void initializeNodePage(int xid, Page *p, size_t keylen) {
*/
static inline size_t getKeySizeFixed(int xid, Page const *p) {
return *recordsize_ptr(p) - sizeof(lsmTreeNodeRecord);
return (*recordsize_ptr(p)) - sizeof(lsmTreeNodeRecord);
}
static inline size_t getKeySizeVirtualMethods(int xid, Page *p) {
@ -190,7 +203,7 @@ recordid TlsmCreate(int xid, int comparator, int keySize) {
assert(HEADER_SIZE + 2 * (sizeof(lsmTreeNodeRecord) +keySize) <
USABLE_SIZE_OF_PAGE - 2 * sizeof(short));
pageid_t root = TpageAlloc(xid);
pageid_t root = pageAllocator(xid, pageAllocatorConfig);
DEBUG("Root = %lld\n", root);
recordid ret = { root, 0, 0 };
@ -237,7 +250,7 @@ static recordid buildPathToLeaf(int xid, recordid root, Page *root_p,
assert(depth);
DEBUG("buildPathToLeaf(depth=%d) (lastleaf=%lld) called\n",depth, lastLeaf);
pageid_t child = TpageAlloc(xid); // XXX Use some other function...
pageid_t child = pageAllocator(xid, pageAllocatorConfig); // XXX Use some other function...
DEBUG("new child = %lld internal? %d\n", child, depth-1);
Page *child_p = loadPage(xid, child);
@ -316,6 +329,8 @@ static recordid appendInternalNode(int xid, Page *p,
int depth,
const byte *key, size_t key_len,
pageid_t val_page, pageid_t lastLeaf) {
assert(*page_type_ptr(p) == LSM_ROOT_PAGE ||
*page_type_ptr(p) == FIXED_PAGE);
if(!depth) {
// leaf node.
recordid ret = recordPreAlloc(xid, p, sizeof(lsmTreeNodeRecord)+key_len);
@ -449,7 +464,7 @@ recordid TlsmAppendPage(int xid, recordid tree,
if(ret.size == INVALID_SLOT) {
DEBUG("Need to split root; depth = %d\n", depth);
pageid_t child = TpageAlloc(xid);
pageid_t child = pageAllocator(xid, pageAllocatorConfig);
Page *lc = loadPage(xid, child);
writelock(lc->rwlatch,0);
@ -629,15 +644,17 @@ pageid_t TlsmFindPage(int xid, recordid tree, const byte *key) {
associated with the tree.
*/
static void lsmPageLoaded(Page *p) {
/// XXX should call fixedLoaded, or something...
lsmTreeState *state = malloc(sizeof(lsmTreeState));
state->lastLeaf = -1;
p->impl = state;
}
static void lsmPageFlushed(Page *p) { }
/**
Free any soft state associated with the tree rooted at page p.
This is called by the buffer manager.
*/
static void lsmPageFlushed(Page *p) {
static void lsmPageCleanup(Page *p) {
lsmTreeState *state = p->impl;
free(state);
}
@ -648,6 +665,7 @@ page_impl lsmRootImpl() {
page_impl pi = fixedImpl();
pi.pageLoaded = lsmPageLoaded;
pi.pageFlushed = lsmPageFlushed;
pi.pageCleanup = lsmPageCleanup;
pi.page_type = LSM_ROOT_PAGE;
return pi;
}
@ -706,7 +724,7 @@ int lsmTreeIterator_next(int xid, lladdIterator_t *it) {
impl->current = fixedNext(xid, impl->p, impl->current);
if(impl->current.size == INVALID_SLOT) {
const lsmTreeNodeRecord *next_rec = readNodeRecord(xid,impl->p,NEXT_LEAF,
impl->current.size);
keySize);
unlock(impl->p->rwlatch);
releasePage(impl->p);
@ -722,6 +740,9 @@ int lsmTreeIterator_next(int xid, lladdIterator_t *it) {
impl->p = 0;
impl->current.size = -1;
}
} else {
assert(impl->current.size == keySize + sizeof(lsmTreeNodeRecord));
impl->current.size = keySize;
}
if(impl->current.size != INVALID_SLOT) {
impl->t = readNodeRecord(xid,impl->p,impl->current.slot,impl->current.size);

View file

@ -63,6 +63,9 @@ pblHashTable_t * nestedTopActions = NULL;
void initNestedTopActions() {
nestedTopActions = pblHtCreate();
}
void deinitNestedTopActions() {
pblHtDelete(nestedTopActions);
}
/** @todo TbeginNestedTopAction's API might not be quite right.
Are there cases where we need to pass a recordid in?

View file

@ -322,6 +322,13 @@ void pageFlushed(Page * p){
*lsn_ptr(p) = p->LSN;
}
}
void pageCleanup(Page * p) {
short type = *page_type_ptr(p);
if(type) {
assert(page_impls[type].page_type == type);
page_impls[type].pageCleanup(p);
}
}
/// Generic block implementations

View file

@ -272,7 +272,7 @@ int pageFreespace(int xid, Page * p);
void pageCompact(Page * p);
void pageLoaded(Page * p);
void pageFlushed(Page * p);
void pageCleanup(Page * p);
/**
@return -1 if the field does not exist, the size of the field otherwise (the rid parameter's size field will be ignored).
*/
@ -581,6 +581,7 @@ typedef struct page_impl {
This function should record p->LSN somewhere appropriate
*/
void (*pageFlushed)(Page * p);
void (*pageCleanup)(Page * p);
} page_impl;
/**

View file

@ -79,6 +79,7 @@ static int fixedGetLength(int xid, Page *p, recordid rid) {
return rid.slot > *recordcount_ptr(p) ?
INVALID_SLOT : physical_slot_length(*recordsize_ptr(p));
}
static int notSupported(int xid, Page * p) { return 0; }
static int fixedFreespace(int xid, Page * p) {
@ -129,6 +130,7 @@ void fixedLoaded(Page *p) {
void fixedFlushed(Page *p) {
*lsn_ptr(p) = p->LSN;
}
void fixedCleanup(Page *p) { }
page_impl fixedImpl() {
static page_impl pi = {
FIXED_PAGE,
@ -153,6 +155,7 @@ page_impl fixedImpl() {
0, // XXX dereference
fixedLoaded, // loaded
fixedFlushed, // flushed
fixedCleanup
};
return pi;
}

View file

@ -2,7 +2,7 @@
#ifndef __FIXED_H
#define __FIXED_H
// @todo rename fixed.h macros to something more specific
#define recordsize_ptr(page) shorts_from_end((page), 1)
#define recordcount_ptr(page) shorts_from_end((page), 2)
#define fixed_record_ptr(page, n) bytes_from_start((page), *recordsize_ptr((page)) * (n))

View file

@ -242,7 +242,7 @@ void indirectLoaded(Page *p) {
void indirectFlushed(Page *p) {
*lsn_ptr(p) = p->LSN;
}
void indirectCleanup(Page *p) { }
static page_impl pi = {
INDIRECT_PAGE,
0, //read,
@ -266,6 +266,7 @@ static page_impl pi = {
0, //XXX page_impl_dereference_identity,
indirectLoaded,
indirectFlushed,
indirectCleanup
};
/**

View file

@ -507,6 +507,7 @@ void slottedFlushed(Page *p) {
*lsn_ptr(p) = p->LSN;
slottedFsck(p);
}
void slottedCleanup(Page *p) { }
page_impl slottedImpl() {
static page_impl pi = {
@ -532,6 +533,7 @@ static page_impl pi = {
0, //XXX page_impl_dereference_identity,
slottedLoaded,
slottedFlushed,
slottedCleanup
};
return pi;
}

View file

@ -408,6 +408,8 @@ int Tdeinit() {
assert( numActiveXactions == 0 );
truncationDeinit();
ThashDeinit();
TallocDeinit();
deinitNestedTopActions();
bufDeinit();
DEBUG("Closing page file tdeinit\n");
closePageFile();

View file

@ -21,7 +21,7 @@ void allocTransactionAbort(int xid);
void allocTransactionCommit(int xid);
void TallocInit();
void TallocDeinit();
/**
Allocate a record.

View file

@ -45,6 +45,12 @@ recordid TlsmDealloc(int xid, recordid tree);
recordid TlsmAppendPage(int xid, recordid tree,
const byte *key,
long pageid);
/**
Override the page allocation algorithm that LSM tree uses by default
*/
void TlsmSetPageAllocator(pageid_t (*allocer)(int xid, void * ignored),
void * config);
/**
Lookup a leaf page.
@ -92,7 +98,7 @@ static inline int lsmTreeIterator_key (int xid, lladdIterator_t *it,
byte **key) {
lsmIteratorImpl * impl = (lsmIteratorImpl*)it->impl;
*key = (byte*)(impl->t+1);
return sizeof(impl->current.size);
return impl->current.size;
}
static inline int lsmTreeIterator_value(int xid, lladdIterator_t *it,

View file

@ -57,6 +57,7 @@ terms specified in this license.
#define __NESTED_TOP_ACTIONS_H__
void initNestedTopActions();
void deinitNestedTopActions();
void * TbeginNestedTopAction(int xid, int op, const byte* log_arguments, int log_arguments_length);
lsn_t TendNestedTopAction(int xid, void * handle);
#endif