2004-07-31 00:27:55 +00:00
|
|
|
/** $Id$ */
|
|
|
|
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
#include "../page.h"
|
2005-01-31 01:29:52 +00:00
|
|
|
#include "../blobManager.h" /** So that we can call sizeof(blob_record_t) */
|
2004-07-30 01:28:39 +00:00
|
|
|
#include "slotted.h"
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
/* ------------------ STATIC FUNCTIONS. NONE OF THESE ACQUIRE LOCKS
|
|
|
|
ON THE MEMORY THAT IS PASSED INTO THEM -------------*/
|
|
|
|
|
|
|
|
static void __really_do_ralloc(Page * page, recordid rid) ;
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
|
|
Move all of the records to the beginning of the page in order to
|
|
|
|
increase the available free space.
|
|
|
|
|
|
|
|
The caller of this function must have a writelock on the page.
|
|
|
|
*/
|
2005-01-29 01:17:37 +00:00
|
|
|
|
|
|
|
void slottedCompact(Page * page) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-03-28 20:15:31 +00:00
|
|
|
short i;
|
2004-07-30 01:28:39 +00:00
|
|
|
Page bufPage;
|
|
|
|
byte buffer[PAGE_SIZE];
|
|
|
|
|
2006-03-28 20:15:31 +00:00
|
|
|
short numSlots;
|
2006-03-20 23:11:46 +00:00
|
|
|
size_t meta_size;
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
bufPage.id = -1;
|
|
|
|
bufPage.memAddr = buffer;
|
|
|
|
|
|
|
|
/* Can't compact in place, slot numbers can come in different orders than
|
|
|
|
the physical space allocated to them. */
|
|
|
|
|
|
|
|
memset(buffer, -1, PAGE_SIZE);
|
|
|
|
|
2006-03-20 23:11:46 +00:00
|
|
|
meta_size = (((size_t)page->memAddr) + PAGE_SIZE ) - (size_t)end_of_usable_space_ptr(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
/* *slot_length_ptr(page, (*numslots_ptr(page))-1);*/
|
|
|
|
|
|
|
|
memcpy(buffer + PAGE_SIZE - meta_size, page->memAddr + PAGE_SIZE - meta_size, meta_size);
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedPageInitialize(&bufPage);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
numSlots = *numslots_ptr(page);
|
|
|
|
for (i = 0; i < numSlots; i++) {
|
2004-12-06 01:20:48 +00:00
|
|
|
/* ("i = %d\n", i); */
|
2004-07-30 01:28:39 +00:00
|
|
|
if (isValidSlot(page, i)) {
|
|
|
|
/* printf("copying %d\n", i);
|
|
|
|
fflush(NULL); */
|
|
|
|
/* DEBUG("Buffer offset: %d\n", freeSpace); */
|
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
rid.page = -1;
|
|
|
|
rid.slot = i;
|
|
|
|
rid.size = *slot_length_ptr(page, i);
|
|
|
|
|
|
|
|
__really_do_ralloc(&bufPage, rid);
|
|
|
|
|
|
|
|
memcpy(record_ptr(&bufPage, rid.slot), record_ptr(page, rid.slot), rid.size);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
*slot_ptr(&bufPage, i) = INVALID_SLOT;
|
|
|
|
*slot_length_ptr(&bufPage, i) = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
/** The freelist could potentially run past the end of the
|
|
|
|
space that is allocated for slots (this would happen if
|
|
|
|
the number of slots needed by this page just decreased.
|
|
|
|
If we let the list run outside of that area, it could
|
|
|
|
cause inadvertant page corruption. Therefore, we need to
|
|
|
|
truncate the list before continuing. */
|
|
|
|
|
|
|
|
short next = *freelist_ptr(page);
|
2005-03-24 11:04:06 +00:00
|
|
|
while(next >= numSlots && next != INVALID_SLOT) {
|
2004-07-30 02:07:41 +00:00
|
|
|
next = *slot_length_ptr(page, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
*freelist_ptr(page) = next;
|
|
|
|
|
2005-03-24 11:04:06 +00:00
|
|
|
// Rebuild the freelist.
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
/* *freelist_ptr(&bufPage) = 0;
|
|
|
|
for (i = 0; i < numSlots; i++) {
|
|
|
|
if (!isValidSlot(&bufPage, i)) {
|
|
|
|
*slot_length_ptr(&bufPage, i) = *freelist_ptr(&bufPage);
|
|
|
|
*freelist_ptr(&bufPage) = i;
|
|
|
|
break;
|
|
|
|
}
|
2005-03-24 11:04:06 +00:00
|
|
|
} */
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
memcpy(page->memAddr, buffer, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
/**
|
|
|
|
Invariant: This lock should be held while updating lastFreepage, or
|
|
|
|
while performing any operation that may decrease the amount of
|
|
|
|
freespace in the page that lastFreepage refers to.
|
|
|
|
|
|
|
|
Since pageCompact and slottedDeRalloc may only increase this value,
|
|
|
|
they do not need to hold this lock. Since bufferManager is the
|
|
|
|
only place where rawPageRallocSlot is called, rawPageRallocSlot does not obtain
|
|
|
|
this lock.
|
|
|
|
|
|
|
|
If you are calling rawPageRallocSlot on a page that may be the page
|
|
|
|
lastFreepage refers to, then you will need to acquire
|
|
|
|
lastFreepage_mutex. (Doing so from outside of slotted.c is almost
|
|
|
|
certainly asking for trouble, so lastFreepage_mutex is static.)
|
|
|
|
|
|
|
|
*/
|
2004-10-06 06:08:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*static pthread_mutex_t lastFreepage_mutex; */
|
2004-12-06 01:20:48 +00:00
|
|
|
static unsigned int lastFreepage = -10;
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
void slottedPageInit() {
|
2004-10-06 06:08:09 +00:00
|
|
|
/*pthread_mutex_init(&lastFreepage_mutex , NULL); */
|
2004-08-17 01:46:17 +00:00
|
|
|
lastFreepage = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void slottedPageDeinit() {
|
2004-10-06 06:08:09 +00:00
|
|
|
/* pthread_mutex_destroy(&lastFreepage_mutex); */
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void slottedPageInitialize(Page * page) {
|
2004-12-06 01:20:48 +00:00
|
|
|
/*printf("Initializing page %d\n", page->id);
|
|
|
|
fflush(NULL); */
|
2004-07-30 01:28:39 +00:00
|
|
|
memset(page->memAddr, 0, PAGE_SIZE);
|
2004-07-31 00:27:55 +00:00
|
|
|
*page_type_ptr(page) = SLOTTED_PAGE;
|
2004-07-30 01:28:39 +00:00
|
|
|
*freespace_ptr(page) = 0;
|
|
|
|
*numslots_ptr(page) = 0;
|
|
|
|
*freelist_ptr(page) = INVALID_SLOT;
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2006-03-28 21:21:47 +00:00
|
|
|
/** @todo Implement a model of slotted pages in the test scripts, and
|
|
|
|
then write a randomized test that confirms the model matches the
|
|
|
|
implementation's behavior. */
|
2006-03-20 23:11:46 +00:00
|
|
|
size_t slottedFreespaceUnlocked(Page * page) {
|
2006-03-28 21:21:47 +00:00
|
|
|
// end_of_free_space points to the beginning of the next slot header (if we were to allocate one)
|
|
|
|
byte* end_of_free_space = (byte*)slot_length_ptr(page, *numslots_ptr(page)-1);
|
|
|
|
// start_of_free_space points to the first unallocated byte in the page
|
|
|
|
// (ignoring space that could be reclaimed by compaction)
|
|
|
|
byte* start_of_free_space = (byte*)(page->memAddr + *freespace_ptr(page));
|
|
|
|
// We need the "+ SLOTTED_PAGE_OVERHEAD_PER_RECORD" because the regions they cover could overlap.
|
|
|
|
assert(end_of_free_space + SLOTTED_PAGE_OVERHEAD_PER_RECORD >= start_of_free_space);
|
|
|
|
|
|
|
|
if(end_of_free_space < start_of_free_space) {
|
|
|
|
// The regions overlap; there is no free space.
|
|
|
|
return 0;
|
2006-03-28 20:15:31 +00:00
|
|
|
} else {
|
2006-03-28 21:21:47 +00:00
|
|
|
// The regions do not overlap. There might be free space.
|
|
|
|
return (size_t) (end_of_free_space - start_of_free_space);
|
2006-03-28 20:15:31 +00:00
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
int slottedFreespace(Page * page) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int ret;
|
|
|
|
readlock(page->rwlatch, 292);
|
2004-10-19 21:16:37 +00:00
|
|
|
ret = slottedFreespaceUnlocked(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
readunlock(page->rwlatch);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
/** @todo slottedPreRalloc ignores it's xid parameter; change the
|
|
|
|
interface? (The xid is there for now, in case it allows some
|
|
|
|
optimizations later. Perhaps it's better to cluster allocations
|
|
|
|
from the same xid on the same page, or something...)
|
2005-02-16 04:11:14 +00:00
|
|
|
|
|
|
|
@todo slottedPreRalloc should understand deadlock, and try another page if deadlock occurs.
|
|
|
|
|
|
|
|
@todo need to obtain (transaction-level) write locks _before_ writing log entries. Otherwise, we can deadlock at recovery.
|
2004-07-30 02:07:41 +00:00
|
|
|
*/
|
2005-02-16 04:11:14 +00:00
|
|
|
compensated_function recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
recordid ret;
|
|
|
|
|
2005-01-31 01:29:52 +00:00
|
|
|
int isBlob = 0;
|
|
|
|
|
|
|
|
if(size == BLOB_SLOT) {
|
|
|
|
isBlob = 1;
|
|
|
|
size = sizeof(blob_record_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(size < BLOB_THRESHOLD_SIZE);
|
|
|
|
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
2005-01-28 03:32:17 +00:00
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
if(lastFreepage == -1) {
|
2005-02-22 03:10:54 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
lastFreepage = TpageAlloc(xid);
|
|
|
|
} end_ret(NULLRID);
|
2005-02-16 04:11:14 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
*pp = loadPage(xid, lastFreepage);
|
|
|
|
} end_ret(NULLRID);
|
2004-12-06 01:20:48 +00:00
|
|
|
assert(*page_type_ptr(*pp) == UNINITIALIZED_PAGE);
|
2004-10-06 06:08:09 +00:00
|
|
|
slottedPageInitialize(*pp);
|
2004-08-17 01:46:17 +00:00
|
|
|
} else {
|
2005-02-16 04:11:14 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
*pp = loadPage(xid, lastFreepage);
|
|
|
|
} end_ret(NULLRID);
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
2005-01-31 01:29:52 +00:00
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
if(slottedFreespace(*pp) < size ) {
|
|
|
|
releasePage(*pp);
|
2005-02-22 03:10:54 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
lastFreepage = TpageAlloc(xid);
|
|
|
|
} end_ret(NULLRID);
|
2005-02-16 04:11:14 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
*pp = loadPage(xid, lastFreepage);
|
|
|
|
} end_ret(NULLRID);
|
2004-10-06 06:08:09 +00:00
|
|
|
slottedPageInitialize(*pp);
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
ret = slottedRawRalloc(*pp, size);
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2005-01-31 01:29:52 +00:00
|
|
|
if(isBlob) {
|
|
|
|
*slot_length_ptr(*pp, ret.slot) = BLOB_SLOT;
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
DEBUG("alloced rid = {%d, %d, %ld}\n", ret.page, ret.slot, ret.size);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-02-16 04:11:14 +00:00
|
|
|
compensated_function recordid slottedPreRallocFromPage(int xid, long page, long size, Page **pp) {
|
2005-01-31 01:29:52 +00:00
|
|
|
int isBlob = 0;
|
|
|
|
if(size == BLOB_SLOT) {
|
|
|
|
isBlob = 1;
|
|
|
|
size = sizeof(blob_record_t);
|
|
|
|
}
|
2005-02-16 04:11:14 +00:00
|
|
|
try_ret(NULLRID) {
|
|
|
|
*pp = loadPage(xid, page);
|
|
|
|
} end_ret(NULLRID);
|
2005-01-29 01:17:37 +00:00
|
|
|
if(slottedFreespace(*pp) < size) {
|
|
|
|
releasePage(*pp);
|
|
|
|
*pp = NULL;
|
2005-02-16 04:11:14 +00:00
|
|
|
return NULLRID;
|
2005-01-29 01:17:37 +00:00
|
|
|
}
|
|
|
|
|
2005-01-28 03:32:17 +00:00
|
|
|
if(*page_type_ptr(*pp) == UNINITIALIZED_PAGE) {
|
|
|
|
slottedPageInitialize(*pp);
|
|
|
|
}
|
|
|
|
assert(*page_type_ptr(*pp) == SLOTTED_PAGE);
|
2005-02-24 21:12:36 +00:00
|
|
|
recordid ret = slottedRawRalloc(*pp, size);
|
|
|
|
assert(ret.size == size);
|
2005-01-31 01:29:52 +00:00
|
|
|
if(isBlob) {
|
|
|
|
*slot_length_ptr(*pp, ret.slot) = BLOB_SLOT;
|
|
|
|
}
|
2005-01-28 03:32:17 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
recordid slottedRawRalloc(Page * page, int size) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
writelock(page->rwlatch, 342);
|
|
|
|
|
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
rid.page = page->id;
|
|
|
|
rid.slot = *numslots_ptr(page);
|
|
|
|
rid.size = size;
|
|
|
|
|
2004-12-06 01:20:48 +00:00
|
|
|
/* new way - The freelist_ptr points to the first free slot number, which
|
|
|
|
is the head of a linked list of free slot numbers.*/
|
2004-07-30 01:28:39 +00:00
|
|
|
if(*freelist_ptr(page) != INVALID_SLOT) {
|
|
|
|
rid.slot = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = *slot_length_ptr(page, rid.slot);
|
|
|
|
*slot_length_ptr(page, rid.slot) = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__really_do_ralloc(page, rid);
|
|
|
|
|
|
|
|
/* DEBUG("slot: %d freespace: %d\n", rid.slot, freeSpace); */
|
|
|
|
|
2006-03-20 23:11:46 +00:00
|
|
|
assert(slottedFreespaceUnlocked(page) >= 0);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
writeunlock(page->rwlatch);
|
|
|
|
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __really_do_ralloc(Page * page, recordid rid) {
|
|
|
|
|
2006-03-28 20:15:31 +00:00
|
|
|
short freeSpace;
|
2005-01-31 01:29:52 +00:00
|
|
|
|
|
|
|
int isBlob = 0;
|
|
|
|
|
|
|
|
if(rid.size == BLOB_SLOT) {
|
|
|
|
isBlob = 1;
|
|
|
|
rid.size = sizeof(blob_record_t);
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
assert(rid.size > 0);
|
|
|
|
|
2004-10-19 21:16:37 +00:00
|
|
|
if(slottedFreespaceUnlocked(page) < rid.size) {
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedCompact(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
/* Make sure there's enough free space... */
|
2006-03-28 21:21:47 +00:00
|
|
|
// DELETE NEXT LINE
|
|
|
|
int size = slottedFreespaceUnlocked(page);
|
2004-10-19 21:16:37 +00:00
|
|
|
assert (slottedFreespaceUnlocked(page) >= rid.size);
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
freeSpace = *freespace_ptr(page);
|
|
|
|
|
|
|
|
|
|
|
|
if(*numslots_ptr(page) <= rid.slot) {
|
|
|
|
/* printf("Incrementing numSlots."); */
|
|
|
|
*numslots_ptr(page) = rid.slot + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG("Num slots %d\trid.slot %d\n", *numslots_ptr(page), rid.slot);
|
|
|
|
|
|
|
|
*freespace_ptr(page) = freeSpace + rid.size;
|
|
|
|
|
|
|
|
*slot_ptr(page, rid.slot) = freeSpace;
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
/* assert(!*slot_length_ptr(page, rid.slot) || (-1 == *slot_length_ptr(page, rid.slot)));*/
|
2005-01-31 01:29:52 +00:00
|
|
|
if(isBlob) {
|
|
|
|
*slot_length_ptr(page, rid.slot = BLOB_SLOT);
|
|
|
|
} else {
|
|
|
|
*slot_length_ptr(page, rid.slot) = rid.size;
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-03-20 23:11:46 +00:00
|
|
|
assert(slottedFreespaceUnlocked(page) >= 0);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
recordid slottedPostRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
2005-02-16 04:11:14 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
writelock(page->rwlatch, 376);
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
if(*page_type_ptr(page) != SLOTTED_PAGE) {
|
|
|
|
/* slottedPreRalloc calls this when necessary. However, in
|
|
|
|
the case of a crash, it is possible that
|
|
|
|
slottedPreRalloc's updates were lost, so we need to check
|
|
|
|
for that here.
|
|
|
|
|
|
|
|
If slottedPreRalloc didn't call slottedPageInitialize,
|
|
|
|
then there would be a race condition:
|
|
|
|
|
|
|
|
Thread 1 Thread 2
|
|
|
|
preAlloc(big record)
|
|
|
|
|
|
|
|
preAlloc(big record) // Should check the freespace of the page and fail
|
|
|
|
postAlloc(big record)
|
|
|
|
|
|
|
|
postAlloc(big record) // Thread 2 stole my space! => Crash?
|
|
|
|
|
|
|
|
Note that this _will_ cause trouble if recovery is
|
|
|
|
multi-threaded, and allows the application to begin
|
|
|
|
updating the storefile without first locking any pages
|
|
|
|
that suffer from this problem.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
slottedPageInitialize(page);
|
|
|
|
}
|
|
|
|
|
2004-10-02 07:29:34 +00:00
|
|
|
if((*slot_length_ptr(page, rid.slot) == 0) || (*slot_ptr(page, rid.slot) == INVALID_SLOT)) {
|
|
|
|
/* if(*slot_ptr(page, rid.slot) == INVALID_SLOT) { */
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
__really_do_ralloc(page, rid);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/* int ijk = rid.size;
|
|
|
|
int lmn = *slot_length_ptr(page, rid.slot); */
|
2004-10-02 07:29:34 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
assert((rid.size == *slot_length_ptr(page, rid.slot)) ||
|
|
|
|
(*slot_length_ptr(page, rid.slot) >= PAGE_SIZE));
|
|
|
|
|
|
|
|
}
|
2005-02-22 03:10:54 +00:00
|
|
|
|
|
|
|
pageWriteLSN(xid, page, lsn);
|
|
|
|
|
|
|
|
writeunlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
void slottedDeRalloc(int xid, Page * page, lsn_t lsn, recordid rid) {
|
2006-03-20 23:11:46 +00:00
|
|
|
writelock(page->rwlatch, 443);
|
|
|
|
// readlock(page->rwlatch, 443);
|
2006-03-28 20:15:31 +00:00
|
|
|
size_t oldFreeLen = slottedFreespaceUnlocked(page);
|
|
|
|
*slot_ptr(page, rid.slot) = INVALID_SLOT;
|
|
|
|
*slot_length_ptr(page, rid.slot) = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = rid.slot;
|
|
|
|
/* *slot_length_ptr(page, rid.slot) = 0; */
|
|
|
|
|
|
|
|
pageWriteLSN(xid, page, lsn);
|
|
|
|
size_t newFreeLen = slottedFreespaceUnlocked(page);
|
|
|
|
assert(oldFreeLen <= newFreeLen);
|
|
|
|
unlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2004-10-19 21:16:37 +00:00
|
|
|
void slottedReadUnlocked(int xid, Page * page, recordid rid, byte *buff) {
|
|
|
|
int slot_length;
|
|
|
|
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2005-01-31 01:29:52 +00:00
|
|
|
assert((rid.size == slot_length) || (rid.size == BLOB_SLOT && slot_length == sizeof(blob_record_t))|| (slot_length >= PAGE_SIZE));
|
2004-10-19 21:16:37 +00:00
|
|
|
|
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
/*
|
|
|
|
This should trust the rid (since the caller needs to
|
|
|
|
override the size in special circumstances)
|
|
|
|
|
|
|
|
@todo If the rid size has been overridden, we should check to make
|
|
|
|
sure that this really is a special record.
|
|
|
|
*/
|
2004-08-17 01:46:17 +00:00
|
|
|
void slottedRead(int xid, Page * page, recordid rid, byte *buff) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
int slot_length;
|
|
|
|
readlock(page->rwlatch, 519);
|
|
|
|
|
|
|
|
assert(page->id == rid.page);
|
2006-03-20 23:11:46 +00:00
|
|
|
|
|
|
|
// DELETE THIS
|
|
|
|
|
|
|
|
int free_space = slottedFreespaceUnlocked(page);
|
|
|
|
int slot_count = *numslots_ptr(page);
|
|
|
|
|
|
|
|
// END DELETE THIS
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2005-01-31 01:29:52 +00:00
|
|
|
assert((rid.size == slot_length) || (rid.size == BLOB_SLOT && slot_length == sizeof(blob_record_t))|| (slot_length >= PAGE_SIZE));
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2006-03-20 23:11:46 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock(page->rwlatch);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
void slottedWrite(int xid, Page * page, lsn_t lsn, recordid rid, const byte *data) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int slot_length;
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
readlock(page->rwlatch, 529);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
assert(rid.size < PAGE_SIZE);
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2005-01-31 01:29:52 +00:00
|
|
|
assert((rid.size == slot_length) || (rid.size == BLOB_SLOT && slot_length == sizeof(blob_record_t))|| (slot_length >= PAGE_SIZE));
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
if(!memcpy(record_ptr(page, rid.slot), data, rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
/*page->LSN = lsn;
|
|
|
|
*lsn_ptr(page) = lsn * /
|
2005-02-16 04:11:14 +00:00
|
|
|
pageWriteLSN-page); */
|
2004-07-30 02:07:41 +00:00
|
|
|
unlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
}
|
2004-10-19 21:16:37 +00:00
|
|
|
void slottedWriteUnlocked(int xid, Page * page, lsn_t lsn, recordid rid, const byte *data) {
|
|
|
|
int slot_length;
|
|
|
|
|
|
|
|
assert(rid.size < PAGE_SIZE);
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
2005-01-31 01:29:52 +00:00
|
|
|
assert((rid.size == slot_length) || (rid.size == BLOB_SLOT && slot_length == sizeof(blob_record_t))|| (slot_length >= PAGE_SIZE));
|
2004-10-19 21:16:37 +00:00
|
|
|
|
|
|
|
if(!memcpy(record_ptr(page, rid.slot), data, rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
/*void slottedSetType(Page * p, int slot, int type) {
|
2004-07-30 01:28:39 +00:00
|
|
|
assert(type > PAGE_SIZE);
|
|
|
|
writelock(p->rwlatch, 686);
|
|
|
|
*slot_length_ptr(p, slot) = type;
|
|
|
|
unlock(p->rwlatch);
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
int slottedGetType(Page * p, int slot) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int ret;
|
|
|
|
readlock(p->rwlatch, 693);
|
|
|
|
ret = *slot_length_ptr(p, slot);
|
|
|
|
unlock(p->rwlatch);
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
/ * getSlotType does the locking for us. * /
|
2004-07-30 01:28:39 +00:00
|
|
|
return ret > PAGE_SIZE ? ret : NORMAL_SLOT;
|
2004-08-21 00:03:30 +00:00
|
|
|
}*/
|