2004-07-31 00:27:55 +00:00
|
|
|
/** $Id$ */
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
#include "../page.h"
|
|
|
|
#include "slotted.h"
|
|
|
|
#include <assert.h>
|
|
|
|
|
2006-06-13 22:45:30 +00:00
|
|
|
static void really_do_ralloc(Page * page, recordid rid) ;
|
2006-07-21 01:07:09 +00:00
|
|
|
size_t slottedFreespaceForSlot(Page * page, int slot);
|
2006-07-22 00:01:42 +00:00
|
|
|
void fsckSlottedPage(const Page const * page) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2006-08-10 23:59:13 +00:00
|
|
|
#ifdef SLOTTED_PAGE_SANITY_CHECKS
|
2006-07-22 00:01:42 +00:00
|
|
|
Page dummy;
|
|
|
|
|
|
|
|
dummy.id = -1;
|
|
|
|
dummy.memAddr = 0;
|
|
|
|
|
|
|
|
const short page_type = *page_type_ptr(page);
|
|
|
|
const short numslots = *numslots_ptr(page);
|
|
|
|
const short freespace = *freespace_ptr(page);
|
|
|
|
const short freelist = *freelist_ptr(page);
|
|
|
|
|
|
|
|
const long slotListStart = (long)slot_length_ptr(&dummy, numslots-1);
|
|
|
|
assert(slotListStart < PAGE_SIZE && slotListStart >= 0);
|
|
|
|
assert(page_type == SLOTTED_PAGE ||
|
|
|
|
page_type == BOUNDARY_TAG_PAGE);
|
|
|
|
assert(numslots >= 0);
|
|
|
|
assert(numslots * SLOTTED_PAGE_OVERHEAD_PER_RECORD < PAGE_SIZE);
|
|
|
|
assert(freespace >= 0);
|
|
|
|
assert(freespace <= slotListStart);
|
|
|
|
assert(freelist >= INVALID_SLOT);
|
|
|
|
assert(freelist < numslots);
|
|
|
|
|
2007-01-22 20:55:25 +00:00
|
|
|
// Check integrity of freelist. All free slots less than
|
|
|
|
// numslots should be on it, in order.
|
2006-07-22 00:01:42 +00:00
|
|
|
|
|
|
|
short * slot_offsets = alloca(numslots * sizeof(short));
|
|
|
|
short * slot_lengths = alloca(numslots * sizeof(short));
|
|
|
|
for(int i = 0; i < numslots; i++) {
|
|
|
|
slot_offsets[i] = *slot_ptr(page, i);
|
|
|
|
slot_lengths[i] = *slot_length_ptr(page, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
short foundEndOfList = 0;
|
|
|
|
|
|
|
|
if(freelist != INVALID_SLOT) {
|
|
|
|
assert(slot_offsets[freelist] == INVALID_SLOT);
|
|
|
|
} else {
|
|
|
|
foundEndOfList = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(short i = 0; i < numslots; i++) {
|
|
|
|
const short slot_length = slot_lengths[i];
|
|
|
|
const short slot_offset = slot_offsets[i];
|
|
|
|
if(slot_offset == INVALID_SLOT) {
|
|
|
|
if(slot_length == INVALID_SLOT) {
|
|
|
|
assert(!foundEndOfList);
|
|
|
|
foundEndOfList = 1;
|
|
|
|
} else {
|
|
|
|
assert (slot_offsets[slot_length] == INVALID_SLOT);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(slot_offset + slot_length <= freespace);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Is the free list terminated?
|
|
|
|
assert(foundEndOfList);
|
|
|
|
|
|
|
|
#ifdef SLOTTED_PAGE_CHECK_FOR_OVERLAP
|
|
|
|
|
|
|
|
const byte UNUSED = 0xFF;
|
|
|
|
const byte PAGE_HEADER = 0xFE;
|
|
|
|
const byte SLOTTED_HEADER = 0xFD;
|
|
|
|
// const byte SLOT_LIST = 0xFC;
|
|
|
|
const byte FREE_SPACE = 0xFB;
|
|
|
|
|
|
|
|
const unsigned short S_SLOT_LIST = 0xFCFC;
|
|
|
|
|
|
|
|
byte image[PAGE_SIZE];
|
|
|
|
for(short i = 0; i < PAGE_SIZE; i++) {
|
|
|
|
image[i] = UNUSED;
|
|
|
|
}
|
|
|
|
for(short i = USABLE_SIZE_OF_PAGE; i < PAGE_SIZE; i++) {
|
|
|
|
image[i] = PAGE_HEADER;
|
|
|
|
}
|
|
|
|
for(short i = USABLE_SIZE_OF_PAGE - SLOTTED_PAGE_HEADER_OVERHEAD; i < USABLE_SIZE_OF_PAGE; i++) {
|
|
|
|
image[i] = SLOTTED_HEADER;
|
|
|
|
}
|
|
|
|
for(short i = *freespace_ptr(page); i < slotListStart; i++) {
|
|
|
|
image[i] = FREE_SPACE;
|
|
|
|
}
|
|
|
|
|
|
|
|
dummy.memAddr = image;
|
|
|
|
|
|
|
|
for(short i = 0; i < *numslots_ptr(page); i++) {
|
|
|
|
*slot_ptr(&dummy, i) = S_SLOT_LIST;
|
|
|
|
*slot_length_ptr(&dummy, i) = S_SLOT_LIST;
|
|
|
|
}
|
|
|
|
for(short i = 0; i < *numslots_ptr(page); i++) {
|
|
|
|
short slot_offset = *slot_ptr(page, i);
|
|
|
|
if(slot_offset != INVALID_SLOT) {
|
|
|
|
const unsigned char ci = i % 0xFF;
|
|
|
|
short slot_len = physical_slot_length(*slot_length_ptr(page, i));
|
|
|
|
|
|
|
|
for(short j = 0; j < slot_len; j++) {
|
|
|
|
assert(image[slot_offset + j] == 0xFF);
|
|
|
|
image[slot_offset + j] = ci;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // SLOTTED_PAGE_CHECK_FOR_OVERLAP
|
2006-08-10 23:59:13 +00:00
|
|
|
#endif // SLOTTED_PAGE_SANITY_CHECKS
|
2006-07-22 00:01:42 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2006-08-10 23:59:13 +00:00
|
|
|
#ifndef SLOTTED_PAGE_SANITY_CHECKS
|
|
|
|
#define fsckSlottedPage(x) ((void)0)
|
|
|
|
#endif
|
2006-07-22 00:01:42 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
/**
|
|
|
|
|
|
|
|
Move all of the records to the beginning of the page in order to
|
|
|
|
increase the available free space.
|
|
|
|
|
|
|
|
The caller of this function must have a writelock on the page.
|
|
|
|
*/
|
2005-01-29 01:17:37 +00:00
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
void slottedCompact(Page * page) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2006-07-22 00:01:42 +00:00
|
|
|
Page bufPage;
|
|
|
|
byte buffer[PAGE_SIZE];
|
|
|
|
bufPage.memAddr = buffer;
|
|
|
|
|
|
|
|
// Copy external headers into bufPage.
|
|
|
|
|
|
|
|
memcpy(&buffer[USABLE_SIZE_OF_PAGE], &(page->memAddr[USABLE_SIZE_OF_PAGE]), PAGE_SIZE - USABLE_SIZE_OF_PAGE);
|
|
|
|
|
|
|
|
// Now, build new slotted page in the bufPage struct.
|
|
|
|
|
|
|
|
*freespace_ptr(&bufPage) = 0;
|
|
|
|
// numslots_ptr will be set later.
|
|
|
|
*freelist_ptr(&bufPage) = INVALID_SLOT;
|
|
|
|
|
|
|
|
const short numSlots = *numslots_ptr(page);
|
|
|
|
short lastFreeSlot = INVALID_SLOT;
|
|
|
|
short lastFreeSlotBeforeUsedSlot = INVALID_SLOT;
|
|
|
|
short lastUsedSlot = -1;
|
|
|
|
|
|
|
|
// Rebuild free list.
|
|
|
|
|
|
|
|
for(short i = 0; i < numSlots; i++) {
|
|
|
|
if(*slot_ptr(page, i) == INVALID_SLOT) {
|
|
|
|
if(lastFreeSlot == INVALID_SLOT) {
|
|
|
|
*freelist_ptr(&bufPage) = i;
|
|
|
|
} else {
|
|
|
|
*slot_length_ptr(&bufPage, lastFreeSlot) = i;
|
|
|
|
}
|
|
|
|
*slot_ptr(&bufPage, i) = INVALID_SLOT;
|
|
|
|
lastFreeSlot = i;
|
|
|
|
} else {
|
|
|
|
lastUsedSlot = i;
|
|
|
|
lastFreeSlotBeforeUsedSlot = lastFreeSlot;
|
|
|
|
|
|
|
|
short logicalSize = *slot_length_ptr(page, i);
|
|
|
|
short physicalSize = physical_slot_length(logicalSize);
|
|
|
|
|
|
|
|
memcpy(&(buffer[*freespace_ptr(&bufPage)]), record_ptr(page, i), physicalSize);
|
|
|
|
|
|
|
|
*slot_ptr(&bufPage, i) = *freespace_ptr(&bufPage);
|
|
|
|
*slot_length_ptr(&bufPage, i) = logicalSize;
|
|
|
|
|
|
|
|
(*freespace_ptr(&bufPage)) += physicalSize;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate linked list, and update numslots_ptr.
|
|
|
|
*slot_length_ptr(&bufPage, lastFreeSlotBeforeUsedSlot) = INVALID_SLOT;
|
|
|
|
*numslots_ptr(&bufPage) = lastUsedSlot+1;
|
|
|
|
|
|
|
|
memcpy(page->memAddr, buffer, PAGE_SIZE);
|
|
|
|
|
|
|
|
fsckSlottedPage(page);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
void slottedPageInit() {
|
2006-07-22 00:01:42 +00:00
|
|
|
#ifdef SLOTTED_PAGE_CHECK_FOR_OVERLAP
|
|
|
|
printf("slotted.c: Using expensive page sanity checking.\n");
|
|
|
|
#endif
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
2006-04-11 20:35:21 +00:00
|
|
|
void slottedPageDeInit() {
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void slottedPageInitialize(Page * page) {
|
2004-07-31 00:27:55 +00:00
|
|
|
*page_type_ptr(page) = SLOTTED_PAGE;
|
2004-07-30 01:28:39 +00:00
|
|
|
*freespace_ptr(page) = 0;
|
|
|
|
*numslots_ptr(page) = 0;
|
|
|
|
*freelist_ptr(page) = INVALID_SLOT;
|
|
|
|
}
|
2006-04-05 00:38:11 +00:00
|
|
|
size_t slottedFreespaceUnlocked(Page * page);
|
|
|
|
|
|
|
|
/**
|
2006-06-13 22:45:30 +00:00
|
|
|
This is needed to correctly implement really_do_ralloc(), since
|
2006-04-05 00:38:11 +00:00
|
|
|
it takes the position of the new slot's header into account.
|
|
|
|
*/
|
|
|
|
size_t slottedFreespaceForSlot(Page * page, int slot) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2006-04-05 00:38:11 +00:00
|
|
|
size_t slotOverhead;
|
|
|
|
|
2006-08-04 23:45:27 +00:00
|
|
|
if(slot == INVALID_SLOT) {
|
2006-07-21 01:07:09 +00:00
|
|
|
slotOverhead = (*freelist_ptr(page) == INVALID_SLOT) ? SLOTTED_PAGE_OVERHEAD_PER_RECORD : 0;
|
|
|
|
} else if(slot < *numslots_ptr(page)) {
|
2006-04-05 00:38:11 +00:00
|
|
|
slotOverhead = 0;
|
|
|
|
} else {
|
2006-07-21 01:07:09 +00:00
|
|
|
// slotOverhead = SLOTTED_PAGE_OVERHEAD_PER_RECORD * (*numslots_ptr(page) - slot);
|
|
|
|
slotOverhead = SLOTTED_PAGE_OVERHEAD_PER_RECORD * ((slot+1) - *numslots_ptr(page));
|
2006-04-05 00:38:11 +00:00
|
|
|
}
|
|
|
|
// end_of_free_space points to the beginning of the slot header at the bottom of the page header.
|
|
|
|
byte* end_of_free_space = (byte*)slot_length_ptr(page, (*numslots_ptr(page))-1);
|
2006-04-05 02:52:40 +00:00
|
|
|
|
2006-04-05 00:38:11 +00:00
|
|
|
// start_of_free_space points to the first unallocated byte in the page
|
|
|
|
// (ignoring space that could be reclaimed by compaction)
|
|
|
|
byte* start_of_free_space = (byte*)(page->memAddr + *freespace_ptr(page));
|
2006-04-05 02:52:40 +00:00
|
|
|
|
2006-04-05 00:38:11 +00:00
|
|
|
assert(end_of_free_space >= start_of_free_space);
|
|
|
|
|
|
|
|
if(end_of_free_space < start_of_free_space + slotOverhead) {
|
2006-04-05 02:52:40 +00:00
|
|
|
// The regions would overlap after allocation. There is no free space.
|
2006-04-05 00:38:11 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
2006-04-05 02:52:40 +00:00
|
|
|
// The regions would not overlap. There might be free space.
|
2006-04-05 00:38:11 +00:00
|
|
|
return (size_t) (end_of_free_space - start_of_free_space - slotOverhead);
|
|
|
|
}
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-03-28 21:21:47 +00:00
|
|
|
/** @todo Implement a model of slotted pages in the test scripts, and
|
|
|
|
then write a randomized test that confirms the model matches the
|
|
|
|
implementation's behavior. */
|
2006-04-05 00:38:11 +00:00
|
|
|
size_t slottedFreespaceUnlocked(Page * page) {
|
2006-08-04 23:45:27 +00:00
|
|
|
return slottedFreespaceForSlot(page, INVALID_SLOT);
|
2006-04-05 00:38:11 +00:00
|
|
|
}
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2006-04-05 00:38:11 +00:00
|
|
|
size_t slottedFreespace(Page * page) {
|
|
|
|
size_t ret;
|
2004-07-30 01:28:39 +00:00
|
|
|
readlock(page->rwlatch, 292);
|
2004-10-19 21:16:37 +00:00
|
|
|
ret = slottedFreespaceUnlocked(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
readunlock(page->rwlatch);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
recordid slottedRawRalloc(Page * page, int size) {
|
2006-06-16 00:05:44 +00:00
|
|
|
int type = size;
|
2006-06-17 00:25:09 +00:00
|
|
|
size = physical_slot_length(type);
|
2006-06-16 00:05:44 +00:00
|
|
|
assert(type != INVALID_SLOT);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-06-16 00:05:44 +00:00
|
|
|
writelock(page->rwlatch, 342);
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-06-16 00:05:44 +00:00
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
rid.page = page->id;
|
|
|
|
rid.slot = *numslots_ptr(page);
|
2006-06-17 00:25:09 +00:00
|
|
|
rid.size = type; // The rid should reflect the fact that this is a special slot.
|
2006-07-22 00:01:42 +00:00
|
|
|
|
2006-06-16 00:05:44 +00:00
|
|
|
/* The freelist_ptr points to the first free slot number, which
|
|
|
|
is the head of a linked list of free slot numbers.*/
|
|
|
|
if(*freelist_ptr(page) != INVALID_SLOT) {
|
2006-07-22 00:01:42 +00:00
|
|
|
rid.slot = *freelist_ptr(page);
|
|
|
|
// really_do_ralloc will look this slot up in the freelist (which
|
|
|
|
// is O(1), since rid.slot is the head), and then remove it from
|
|
|
|
// the list.
|
|
|
|
}
|
2006-06-16 00:05:44 +00:00
|
|
|
|
|
|
|
really_do_ralloc(page, rid);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
assert(*numslots_ptr(page) > rid.slot);
|
2006-06-17 00:25:09 +00:00
|
|
|
assert(type == *slot_length_ptr(page, rid.slot));
|
|
|
|
assert(size == physical_slot_length(*slot_length_ptr(page, rid.slot)));
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-06-16 00:05:44 +00:00
|
|
|
writeunlock(page->rwlatch);
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2006-06-16 00:05:44 +00:00
|
|
|
return rid;
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
/**
|
2007-06-01 21:06:18 +00:00
|
|
|
Allocation is scary without locking. Consider this situation:
|
2006-04-05 02:52:40 +00:00
|
|
|
|
|
|
|
(1) *numslot_ptr(page) is 10
|
2006-06-13 22:45:30 +00:00
|
|
|
(2) An aborting transcation calls really_do_ralloc(page) with rid.slot = 12
|
2006-04-05 02:52:40 +00:00
|
|
|
(3) *numslot_ptr(page) must be incremented to 12. Now, what happens to 11?
|
|
|
|
- If 11 was also deleted by a transaction that could abort, we should lock it so that it won't be reused.
|
|
|
|
(4) This function adds it to the freelist to avoid leaking space. (Therefore, Talloc() can return recordids that will
|
|
|
|
be reused by aborting transactions...)
|
2006-06-20 00:03:38 +00:00
|
|
|
|
2007-06-01 21:06:18 +00:00
|
|
|
For now, we make sure that we don't alloc off a page that another active
|
|
|
|
transaction dealloced from.
|
|
|
|
|
2006-06-20 00:03:38 +00:00
|
|
|
@param rid Recordid with 'internal' size. The size should have already been translated to a type if necessary.
|
2006-04-05 02:52:40 +00:00
|
|
|
*/
|
2006-06-13 22:45:30 +00:00
|
|
|
static void really_do_ralloc(Page * page, recordid rid) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-03-28 20:15:31 +00:00
|
|
|
short freeSpace;
|
2005-01-31 01:29:52 +00:00
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
// Compact the page if we don't have enough room.
|
2006-06-17 00:25:09 +00:00
|
|
|
if(slottedFreespaceForSlot(page, rid.slot) < physical_slot_length(rid.size)) {
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedCompact(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
// Make sure we have enough enough free space for the new record
|
2006-06-17 00:25:09 +00:00
|
|
|
assert (slottedFreespaceForSlot(page, rid.slot) >= physical_slot_length(rid.size));
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2006-04-05 00:38:11 +00:00
|
|
|
freeSpace = *freespace_ptr(page);
|
2006-04-05 02:52:40 +00:00
|
|
|
|
|
|
|
// Remove this entry from the freelist (if necessary) slottedCompact
|
|
|
|
// assumes that this does not change the order of items in the list.
|
|
|
|
// If it did, then slottedCompact could leaks slot id's (or worse!)
|
|
|
|
if(rid.slot < *numslots_ptr(page) && *slot_ptr(page,rid.slot) == INVALID_SLOT) {
|
|
|
|
short next = *freelist_ptr(page);
|
|
|
|
short last = INVALID_SLOT;
|
|
|
|
// special case: is the slot physically before us the predecessor?
|
|
|
|
if(rid.slot > 0) {
|
|
|
|
if(*slot_length_ptr(page, rid.slot-1) == rid.slot && *slot_ptr(page, rid.slot-1) == INVALID_SLOT) {
|
|
|
|
next = rid.slot;
|
|
|
|
last = rid.slot-1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while(next != INVALID_SLOT && next != rid.slot) {
|
|
|
|
last = next;
|
2006-07-22 00:01:42 +00:00
|
|
|
assert(next < *numslots_ptr(page));
|
2006-07-21 01:07:09 +00:00
|
|
|
short next_slot_ptr = *slot_ptr(page, next);
|
|
|
|
assert(next_slot_ptr == INVALID_SLOT);
|
2006-04-05 02:52:40 +00:00
|
|
|
next = *slot_length_ptr(page, next);
|
|
|
|
}
|
|
|
|
if(next == rid.slot) {
|
|
|
|
if(last == INVALID_SLOT) {
|
|
|
|
*freelist_ptr(page) = *slot_length_ptr(page, rid.slot);
|
|
|
|
} else {
|
|
|
|
*slot_length_ptr(page, last) = *slot_length_ptr(page, rid.slot);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert any slots that come between the previous numslots_ptr()
|
|
|
|
// and the slot we're allocating onto the freelist. In order to
|
|
|
|
// promote the reuse of free slot numbers, we go out of our way to make sure
|
|
|
|
// that we put them in the list in increasing order. (Note: slottedCompact's
|
|
|
|
// correctness depends on this behavior!)
|
2006-07-21 01:07:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
if(rid.slot > *numslots_ptr(page)) {
|
|
|
|
short lastSlot;
|
|
|
|
short numSlots = *numslots_ptr(page);
|
|
|
|
if(*freelist_ptr(page) == INVALID_SLOT) {
|
|
|
|
|
|
|
|
*freelist_ptr(page) = numSlots;
|
|
|
|
lastSlot = numSlots;
|
|
|
|
|
|
|
|
*slot_ptr(page, lastSlot) = INVALID_SLOT;
|
|
|
|
// will set slot_length_ptr on next iteration.
|
|
|
|
|
|
|
|
|
|
|
|
(*numslots_ptr(page))++;
|
|
|
|
} else {
|
|
|
|
lastSlot = INVALID_SLOT;
|
|
|
|
short next = *freelist_ptr(page);
|
|
|
|
while(next != INVALID_SLOT) {
|
|
|
|
lastSlot = next;
|
|
|
|
next = *slot_length_ptr(page, lastSlot);
|
|
|
|
assert(lastSlot < *numslots_ptr(page));
|
|
|
|
assert(*slot_ptr(page, lastSlot) == INVALID_SLOT);
|
|
|
|
}
|
|
|
|
*slot_ptr(page, lastSlot) = INVALID_SLOT;
|
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
}
|
|
|
|
|
2006-07-21 01:07:09 +00:00
|
|
|
// lastSlot now contains the tail of the free list. We can start adding slots to the list starting at *numslots_ptr.
|
|
|
|
|
|
|
|
while(*numslots_ptr(page) < rid.slot) {
|
|
|
|
*slot_length_ptr(page, lastSlot) = *numslots_ptr(page);
|
|
|
|
lastSlot = *numslots_ptr(page);
|
|
|
|
*slot_ptr(page, lastSlot) = INVALID_SLOT;
|
|
|
|
(*numslots_ptr(page))++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate the end of the list.
|
2006-07-22 00:01:42 +00:00
|
|
|
assert(lastSlot < *numslots_ptr(page));
|
2006-07-21 01:07:09 +00:00
|
|
|
*slot_length_ptr(page, lastSlot) = INVALID_SLOT;
|
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
}
|
2006-07-21 01:07:09 +00:00
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
if(*numslots_ptr(page) == rid.slot) {
|
2006-07-21 01:07:09 +00:00
|
|
|
*numslots_ptr(page) = rid.slot+1;
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
assert(*numslots_ptr(page) > rid.slot);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
DEBUG("Num slots %d\trid.slot %d\n", *numslots_ptr(page), rid.slot);
|
2006-04-05 02:52:40 +00:00
|
|
|
|
|
|
|
// Reserve space for this record and record the space's offset in
|
|
|
|
// the slot header.
|
2006-07-22 00:01:42 +00:00
|
|
|
|
|
|
|
assert(rid.slot < *numslots_ptr(page));
|
2006-06-17 00:25:09 +00:00
|
|
|
*freespace_ptr(page) = freeSpace + physical_slot_length(rid.size);
|
2004-07-30 01:28:39 +00:00
|
|
|
*slot_ptr(page, rid.slot) = freeSpace;
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2006-06-17 00:25:09 +00:00
|
|
|
*slot_length_ptr(page, rid.slot) = rid.size;
|
2006-07-22 00:01:42 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
2006-06-20 00:03:38 +00:00
|
|
|
/**
|
2006-10-05 00:51:01 +00:00
|
|
|
@param xid
|
|
|
|
@param page
|
|
|
|
@param lsn
|
2006-06-20 00:03:38 +00:00
|
|
|
@param rid with user-visible size.
|
2007-06-01 21:06:18 +00:00
|
|
|
|
|
|
|
@todo Does this still really need to check for BLOB_THRESHOLD_SIZE?
|
|
|
|
Shouldn't the caller call slottedSetType if necessary?
|
2006-06-20 00:03:38 +00:00
|
|
|
*/
|
2005-02-22 03:10:54 +00:00
|
|
|
recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
2005-02-16 04:11:14 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
writelock(page->rwlatch, 376);
|
2006-07-22 00:01:42 +00:00
|
|
|
|
2006-06-20 21:40:21 +00:00
|
|
|
if(rid.size >= BLOB_THRESHOLD_SIZE) {
|
|
|
|
rid.size = BLOB_SLOT;
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-07-18 23:59:00 +00:00
|
|
|
if(*page_type_ptr(page) != SLOTTED_PAGE && *page_type_ptr(page) != BOUNDARY_TAG_PAGE) {
|
2004-08-21 00:03:30 +00:00
|
|
|
/* slottedPreRalloc calls this when necessary. However, in
|
|
|
|
the case of a crash, it is possible that
|
|
|
|
slottedPreRalloc's updates were lost, so we need to check
|
|
|
|
for that here.
|
|
|
|
|
|
|
|
If slottedPreRalloc didn't call slottedPageInitialize,
|
|
|
|
then there would be a race condition:
|
|
|
|
|
|
|
|
Thread 1 Thread 2
|
|
|
|
preAlloc(big record)
|
|
|
|
|
|
|
|
preAlloc(big record) // Should check the freespace of the page and fail
|
|
|
|
postAlloc(big record)
|
|
|
|
|
|
|
|
postAlloc(big record) // Thread 2 stole my space! => Crash?
|
|
|
|
|
|
|
|
Note that this _will_ cause trouble if recovery is
|
|
|
|
multi-threaded, and allows the application to begin
|
|
|
|
updating the storefile without first locking any pages
|
|
|
|
that suffer from this problem.
|
|
|
|
|
2006-07-25 01:05:02 +00:00
|
|
|
Also, this only works because pages that are of type
|
|
|
|
BOUNDARY_TAG_PAGE are guaranteed to have their page type
|
|
|
|
set before recovery calls this function.
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
slottedPageInitialize(page);
|
|
|
|
}
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-08-21 00:03:30 +00:00
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
if(*slot_ptr(page, rid.slot) == INVALID_SLOT || rid.slot >= *numslots_ptr(page)) {
|
2006-06-20 21:40:21 +00:00
|
|
|
really_do_ralloc(page, rid);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
|
2006-04-05 02:52:40 +00:00
|
|
|
// Check to see that the slot happens to be the right size,
|
|
|
|
// so we are (hopefully) just overwriting a slot with
|
2006-07-22 00:01:42 +00:00
|
|
|
// itself. This can happen under normal operation, since
|
|
|
|
// really_do_ralloc() must be called before and after the
|
|
|
|
// log entry is generated. (See comment above...)
|
2006-06-20 21:40:21 +00:00
|
|
|
|
|
|
|
assert(rid.size == *slot_length_ptr(page, rid.slot));
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
}
|
2005-02-22 03:10:54 +00:00
|
|
|
|
|
|
|
pageWriteLSN(xid, page, lsn);
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2005-02-22 03:10:54 +00:00
|
|
|
writeunlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-07-21 01:07:09 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
2006-03-20 23:11:46 +00:00
|
|
|
writelock(page->rwlatch, 443);
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
|
|
|
|
|
|
|
if(*freespace_ptr(page) == *slot_ptr(page, rid.slot) + physical_slot_length(rid.size)) {
|
|
|
|
(*freespace_ptr(page)) -= physical_slot_length(rid.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(rid.slot < *numslots_ptr(page));
|
|
|
|
if(rid.slot == *numslots_ptr(page)-1) {
|
|
|
|
(*numslots_ptr(page))--;
|
|
|
|
} else {
|
|
|
|
*slot_ptr(page, rid.slot) = INVALID_SLOT;
|
|
|
|
*slot_length_ptr(page, rid.slot) = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = rid.slot;
|
|
|
|
}
|
|
|
|
|
2006-03-28 20:15:31 +00:00
|
|
|
pageWriteLSN(xid, page, lsn);
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2006-03-28 20:15:31 +00:00
|
|
|
unlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2007-01-22 20:55:25 +00:00
|
|
|
void slottedReadUnlocked(Page * page, recordid rid, byte *buff) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2004-10-19 21:16:37 +00:00
|
|
|
int slot_length;
|
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-10-19 21:16:37 +00:00
|
|
|
assert(page->id == rid.page);
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2007-01-22 20:55:25 +00:00
|
|
|
assert((rid.size == slot_length));
|
2004-10-19 21:16:37 +00:00
|
|
|
|
2006-06-17 00:25:09 +00:00
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), physical_slot_length(rid.size))) {
|
2004-10-19 21:16:37 +00:00
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-10-19 21:16:37 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-01-22 20:55:25 +00:00
|
|
|
void slottedRead(Page * page, recordid rid, byte *buff) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
int slot_length;
|
|
|
|
readlock(page->rwlatch, 519);
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-06-17 00:25:09 +00:00
|
|
|
// printf("Reading from rid = {%d,%d,%d (%d)}\n", rid.page, rid.slot, rid.size, physical_slot_length(rid.size));
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
assert(page->id == rid.page);
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2007-01-22 20:55:25 +00:00
|
|
|
assert((rid.size == slot_length));
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2006-06-17 00:25:09 +00:00
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), physical_slot_length(rid.size))) {
|
2004-07-30 01:28:39 +00:00
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
unlock(page->rwlatch);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-01-22 20:55:25 +00:00
|
|
|
void slottedWrite(Page * page, recordid rid, const byte *data) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
readlock(page->rwlatch, 529);
|
|
|
|
|
2007-01-22 20:55:25 +00:00
|
|
|
slottedWriteUnlocked(page, rid, data);
|
2006-07-22 00:01:42 +00:00
|
|
|
|
|
|
|
unlock(page->rwlatch);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
2007-01-22 20:55:25 +00:00
|
|
|
void slottedWriteUnlocked(Page * page, recordid rid, const byte *data) {
|
2007-06-01 21:32:33 +00:00
|
|
|
assertlocked(page->rwlatch);
|
2004-10-19 21:16:37 +00:00
|
|
|
int slot_length;
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-10-19 21:16:37 +00:00
|
|
|
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2007-01-22 20:55:25 +00:00
|
|
|
assert((rid.size == slot_length));
|
2004-10-19 21:16:37 +00:00
|
|
|
|
2006-06-17 00:25:09 +00:00
|
|
|
if(!memcpy(record_ptr(page, rid.slot), data, physical_slot_length(rid.size))) {
|
2004-10-19 21:16:37 +00:00
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
2006-07-22 00:01:42 +00:00
|
|
|
fsckSlottedPage(page);
|
2004-10-19 21:16:37 +00:00
|
|
|
}
|