test + fix allocation of zero byte records

This commit is contained in:
Sears Russell 2011-11-07 22:53:55 +00:00
parent cb97772aa6
commit 0058042987
3 changed files with 30 additions and 7 deletions

View file

@ -305,11 +305,13 @@ recordid Talloc(int xid, unsigned long size) {
Page * p = loadPage(xid, alloc->lastFreepage); Page * p = loadPage(xid, alloc->lastFreepage);
writelock(p->rwlatch, 0); writelock(p->rwlatch, 0);
while(stasis_record_freespace(xid, p) < stasis_record_type_to_size(type)) { int rec_size = stasis_record_type_to_size(type);
if(rec_size < 4) { rec_size = 4; }
while(stasis_record_freespace(xid, p) < rec_size) {
stasis_record_compact(p); stasis_record_compact(p);
int newFreespace = stasis_record_freespace(xid, p); int newFreespace = stasis_record_freespace(xid, p);
if(newFreespace >= stasis_record_type_to_size(type)) { if(newFreespace >= rec_size) {
break; break;
} }
@ -318,12 +320,12 @@ recordid Talloc(int xid, unsigned long size) {
releasePage(p); releasePage(p);
pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid,
stasis_record_type_to_size(type)); rec_size);
if(pageid == INVALID_PAGE) { if(pageid == INVALID_PAGE) {
stasis_alloc_reserve_new_region(alloc, xid); stasis_alloc_reserve_new_region(alloc, xid);
pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid,
stasis_record_type_to_size(type)); rec_size);
} }
alloc->lastFreepage = pageid; alloc->lastFreepage = pageid;

View file

@ -229,8 +229,8 @@ static void slottedCompactSlotIDs(int xid, Page * p) {
makes it possible for callers to guarantee the safety makes it possible for callers to guarantee the safety
of a subsequent call to really_do_ralloc(). of a subsequent call to really_do_ralloc().
*/ */
static size_t slottedFreespaceForSlot(Page * page, int slot) { static ssize_t slottedFreespaceForSlot(Page * page, int slot) {
size_t slotOverhead; ssize_t slotOverhead;
if(slot == INVALID_SLOT) { if(slot == INVALID_SLOT) {
slotOverhead = (*stasis_page_slotted_freelist_ptr(page) == INVALID_SLOT) ? SLOTTED_PAGE_OVERHEAD_PER_RECORD : 0; slotOverhead = (*stasis_page_slotted_freelist_ptr(page) == INVALID_SLOT) ? SLOTTED_PAGE_OVERHEAD_PER_RECORD : 0;
@ -254,7 +254,7 @@ static size_t slottedFreespaceForSlot(Page * page, int slot) {
return 0; return 0;
} else { } else {
// The regions would not overlap. There might be free space. // The regions would not overlap. There might be free space.
return (size_t) (end_of_free_space - start_of_free_space - slotOverhead); return (ssize_t) (end_of_free_space - start_of_free_space - slotOverhead);
} }
} }

View file

@ -479,6 +479,26 @@ START_TEST(operation_alloc_test) {
} END_TEST } END_TEST
START_TEST(operation_alloc_small) {
Tinit();
int xid = Tbegin();
recordid lastrid = NULLRID;
for(int j = 0; j < 10; j++) {
for(int i = 0; i * (j+4) < PAGE_SIZE * 3; i++) {
recordid rid = Talloc(xid, j);
assert(rid.page != lastrid.page || rid.slot != lastrid.slot);
DEBUG("%d ", rid.page);
lastrid = rid;
}
DEBUG("\n");
}
Tcommit(xid);
Tdeinit();
} END_TEST;
#define ARRAY_LIST_CHECK_ITER 10000 #define ARRAY_LIST_CHECK_ITER 10000
START_TEST(operation_array_list) { START_TEST(operation_array_list) {
@ -835,6 +855,7 @@ Suite * check_suite(void) {
tcase_add_test(tc, operation_prepare); tcase_add_test(tc, operation_prepare);
} }
tcase_add_test(tc, operation_alloc_test); tcase_add_test(tc, operation_alloc_test);
tcase_add_test(tc, operation_alloc_small);
tcase_add_test(tc, operation_array_list); tcase_add_test(tc, operation_array_list);
tcase_add_test(tc, operation_lsn_free); tcase_add_test(tc, operation_lsn_free);
tcase_add_test(tc, operation_reorderable); tcase_add_test(tc, operation_reorderable);