The freespace calculations were double counting the new slot's header.
This commit is contained in:
parent
ced022d0fe
commit
3c55daede9
1 changed files with 19 additions and 12 deletions
|
@ -138,20 +138,25 @@ void slottedPageInitialize(Page * page) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @todo Implement a model of slotted pages in the test scripts, and
|
||||||
|
then write a randomized test that confirms the model matches the
|
||||||
|
implementation's behavior. */
|
||||||
size_t slottedFreespaceUnlocked(Page * page) {
|
size_t slottedFreespaceUnlocked(Page * page) {
|
||||||
/* @todo I'm unsure these variable names are actually acurrate... */
|
// end_of_free_space points to the beginning of the next slot header (if we were to allocate one)
|
||||||
size_t end_of_free_space = (size_t)slot_length_ptr(page, *numslots_ptr(page));
|
byte* end_of_free_space = (byte*)slot_length_ptr(page, *numslots_ptr(page)-1);
|
||||||
size_t start_of_free_space = (size_t)(page->memAddr + *freespace_ptr(page));
|
// start_of_free_space points to the first unallocated byte in the page
|
||||||
// assert( (slot_length_ptr(page, *numslots_ptr(page))) >= (((size_t)(page->memAddr + *freespace_ptr(page))) + SLOTTED_PAGE_OVERHEAD_PER_RECORD));
|
// (ignoring space that could be reclaimed by compaction)
|
||||||
assert(end_of_free_space >= start_of_free_space);
|
byte* start_of_free_space = (byte*)(page->memAddr + *freespace_ptr(page));
|
||||||
if(end_of_free_space - start_of_free_space <= SLOTTED_PAGE_OVERHEAD_PER_RECORD) {
|
// We need the "+ SLOTTED_PAGE_OVERHEAD_PER_RECORD" because the regions they cover could overlap.
|
||||||
return 0;
|
assert(end_of_free_space + SLOTTED_PAGE_OVERHEAD_PER_RECORD >= start_of_free_space);
|
||||||
} else {
|
|
||||||
return end_of_free_space - start_of_free_space - SLOTTED_PAGE_OVERHEAD_PER_RECORD;
|
|
||||||
}
|
|
||||||
|
|
||||||
// size_t ret = ((size_t)slot_length_ptr(page, *numslots_ptr(page)) - (size_t)(page->memAddr + *freespace_ptr(page))) - SLOTTED_PAGE_OVERHEAD_PER_RECORD;
|
if(end_of_free_space < start_of_free_space) {
|
||||||
//return ret;
|
// The regions overlap; there is no free space.
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
// The regions do not overlap. There might be free space.
|
||||||
|
return (size_t) (end_of_free_space - start_of_free_space);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -302,6 +307,8 @@ static void __really_do_ralloc(Page * page, recordid rid) {
|
||||||
slottedCompact(page);
|
slottedCompact(page);
|
||||||
|
|
||||||
/* Make sure there's enough free space... */
|
/* Make sure there's enough free space... */
|
||||||
|
// DELETE NEXT LINE
|
||||||
|
int size = slottedFreespaceUnlocked(page);
|
||||||
assert (slottedFreespaceUnlocked(page) >= rid.size);
|
assert (slottedFreespaceUnlocked(page) >= rid.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue