2004-07-31 00:27:55 +00:00
|
|
|
/** $Id$ */
|
|
|
|
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
#include "../page.h"
|
2004-08-21 00:03:30 +00:00
|
|
|
/*#include "../blobManager.h" */
|
2004-07-30 01:28:39 +00:00
|
|
|
#include "slotted.h"
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
/* ------------------ STATIC FUNCTIONS. NONE OF THESE ACQUIRE LOCKS
|
|
|
|
ON THE MEMORY THAT IS PASSED INTO THEM -------------*/
|
|
|
|
|
|
|
|
static void __really_do_ralloc(Page * page, recordid rid) ;
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
|
|
Move all of the records to the beginning of the page in order to
|
|
|
|
increase the available free space.
|
|
|
|
|
|
|
|
The caller of this function must have a writelock on the page.
|
|
|
|
*/
|
2004-08-17 01:46:17 +00:00
|
|
|
static void slottedCompact(Page * page) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
int i;
|
|
|
|
Page bufPage;
|
|
|
|
byte buffer[PAGE_SIZE];
|
|
|
|
|
|
|
|
int numSlots;
|
|
|
|
int meta_size;
|
|
|
|
|
|
|
|
bufPage.id = -1;
|
|
|
|
bufPage.memAddr = buffer;
|
|
|
|
|
|
|
|
/* Can't compact in place, slot numbers can come in different orders than
|
|
|
|
the physical space allocated to them. */
|
|
|
|
|
|
|
|
memset(buffer, -1, PAGE_SIZE);
|
|
|
|
|
|
|
|
meta_size = (((int)page->memAddr) + PAGE_SIZE ) - (int)end_of_usable_space_ptr(page);
|
|
|
|
/* *slot_length_ptr(page, (*numslots_ptr(page))-1);*/
|
|
|
|
|
|
|
|
memcpy(buffer + PAGE_SIZE - meta_size, page->memAddr + PAGE_SIZE - meta_size, meta_size);
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedPageInitialize(&bufPage);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
numSlots = *numslots_ptr(page);
|
|
|
|
for (i = 0; i < numSlots; i++) {
|
2004-12-06 01:20:48 +00:00
|
|
|
/* ("i = %d\n", i); */
|
2004-07-30 01:28:39 +00:00
|
|
|
if (isValidSlot(page, i)) {
|
|
|
|
/* printf("copying %d\n", i);
|
|
|
|
fflush(NULL); */
|
|
|
|
/* DEBUG("Buffer offset: %d\n", freeSpace); */
|
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
rid.page = -1;
|
|
|
|
rid.slot = i;
|
|
|
|
rid.size = *slot_length_ptr(page, i);
|
|
|
|
|
|
|
|
__really_do_ralloc(&bufPage, rid);
|
|
|
|
|
|
|
|
memcpy(record_ptr(&bufPage, rid.slot), record_ptr(page, rid.slot), rid.size);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
*slot_ptr(&bufPage, i) = INVALID_SLOT;
|
|
|
|
*slot_length_ptr(&bufPage, i) = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
/** The freelist could potentially run past the end of the
|
|
|
|
space that is allocated for slots (this would happen if
|
|
|
|
the number of slots needed by this page just decreased.
|
|
|
|
If we let the list run outside of that area, it could
|
|
|
|
cause inadvertant page corruption. Therefore, we need to
|
|
|
|
truncate the list before continuing. */
|
|
|
|
|
|
|
|
short next = *freelist_ptr(page);
|
|
|
|
while(next >= numSlots) {
|
|
|
|
next = *slot_length_ptr(page, next);
|
|
|
|
}
|
|
|
|
|
|
|
|
*freelist_ptr(page) = next;
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
/* Rebuild the freelist. */
|
|
|
|
|
|
|
|
/* *freelist_ptr(&bufPage) = 0;
|
|
|
|
for (i = 0; i < numSlots; i++) {
|
|
|
|
if (!isValidSlot(&bufPage, i)) {
|
|
|
|
*slot_length_ptr(&bufPage, i) = *freelist_ptr(&bufPage);
|
|
|
|
*freelist_ptr(&bufPage) = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
|
|
|
memcpy(page->memAddr, buffer, PAGE_SIZE);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
/**
|
|
|
|
Invariant: This lock should be held while updating lastFreepage, or
|
|
|
|
while performing any operation that may decrease the amount of
|
|
|
|
freespace in the page that lastFreepage refers to.
|
|
|
|
|
|
|
|
Since pageCompact and slottedDeRalloc may only increase this value,
|
|
|
|
they do not need to hold this lock. Since bufferManager is the
|
|
|
|
only place where rawPageRallocSlot is called, rawPageRallocSlot does not obtain
|
|
|
|
this lock.
|
|
|
|
|
|
|
|
If you are calling rawPageRallocSlot on a page that may be the page
|
|
|
|
lastFreepage refers to, then you will need to acquire
|
|
|
|
lastFreepage_mutex. (Doing so from outside of slotted.c is almost
|
|
|
|
certainly asking for trouble, so lastFreepage_mutex is static.)
|
|
|
|
|
|
|
|
*/
|
2004-10-06 06:08:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*static pthread_mutex_t lastFreepage_mutex; */
|
2004-12-06 01:20:48 +00:00
|
|
|
static unsigned int lastFreepage = -10;
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
void slottedPageInit() {
|
2004-10-06 06:08:09 +00:00
|
|
|
/*pthread_mutex_init(&lastFreepage_mutex , NULL); */
|
2004-08-17 01:46:17 +00:00
|
|
|
lastFreepage = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void slottedPageDeinit() {
|
2004-10-06 06:08:09 +00:00
|
|
|
/* pthread_mutex_destroy(&lastFreepage_mutex); */
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void slottedPageInitialize(Page * page) {
|
2004-12-06 01:20:48 +00:00
|
|
|
/*printf("Initializing page %d\n", page->id);
|
|
|
|
fflush(NULL); */
|
2004-07-30 01:28:39 +00:00
|
|
|
memset(page->memAddr, 0, PAGE_SIZE);
|
2004-07-31 00:27:55 +00:00
|
|
|
*page_type_ptr(page) = SLOTTED_PAGE;
|
2004-07-30 01:28:39 +00:00
|
|
|
*freespace_ptr(page) = 0;
|
|
|
|
*numslots_ptr(page) = 0;
|
|
|
|
*freelist_ptr(page) = INVALID_SLOT;
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
2004-10-19 21:16:37 +00:00
|
|
|
int slottedFreespaceUnlocked(Page * page) {
|
2004-07-30 01:28:39 +00:00
|
|
|
return (int)slot_length_ptr(page, *numslots_ptr(page)) - (int)(page->memAddr + *freespace_ptr(page));
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
int slottedFreespace(Page * page) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int ret;
|
|
|
|
readlock(page->rwlatch, 292);
|
2004-10-19 21:16:37 +00:00
|
|
|
ret = slottedFreespaceUnlocked(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
readunlock(page->rwlatch);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
/** @todo slottedPreRalloc ignores it's xid parameter; change the
|
|
|
|
interface? (The xid is there for now, in case it allows some
|
|
|
|
optimizations later. Perhaps it's better to cluster allocations
|
|
|
|
from the same xid on the same page, or something...)
|
2004-07-30 02:07:41 +00:00
|
|
|
*/
|
2004-10-06 06:08:09 +00:00
|
|
|
recordid slottedPreRalloc(int xid, long size, Page ** pp) {
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
recordid ret;
|
2004-10-06 06:08:09 +00:00
|
|
|
/* Page * p; */
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
/* DEBUG("Rallocing record of size %ld\n", (long int)size); */
|
|
|
|
|
|
|
|
assert(size < BLOB_THRESHOLD_SIZE);
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/* pthread_mutex_lock(&lastFreepage_mutex); */
|
2004-08-17 01:46:17 +00:00
|
|
|
/** @todo is ((unsigned int) foo) == -1 portable? Gotta love C.*/
|
2004-12-06 01:20:48 +00:00
|
|
|
/*printf("lastFreepage %d\n", lastFreepage); fflush(NULL); */
|
2004-08-17 01:46:17 +00:00
|
|
|
if(lastFreepage == -1) {
|
2004-08-21 00:03:30 +00:00
|
|
|
lastFreepage = TpageAlloc(xid/*, SLOTTED_PAGE*/);
|
2004-10-06 06:08:09 +00:00
|
|
|
*pp = loadPage(lastFreepage);
|
2004-12-06 01:20:48 +00:00
|
|
|
assert(*page_type_ptr(*pp) == UNINITIALIZED_PAGE);
|
2004-10-06 06:08:09 +00:00
|
|
|
slottedPageInitialize(*pp);
|
2004-08-17 01:46:17 +00:00
|
|
|
} else {
|
2004-10-06 06:08:09 +00:00
|
|
|
*pp = loadPage(lastFreepage);
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
if(slottedFreespace(*pp) < size ) {
|
|
|
|
releasePage(*pp);
|
2004-08-21 00:03:30 +00:00
|
|
|
lastFreepage = TpageAlloc(xid/*, SLOTTED_PAGE*/);
|
2004-10-06 06:08:09 +00:00
|
|
|
*pp = loadPage(lastFreepage);
|
|
|
|
slottedPageInitialize(*pp);
|
2004-08-17 01:46:17 +00:00
|
|
|
}
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
ret = slottedRawRalloc(*pp, size);
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/* releasePage(p); */ /* This gets called in Talloc() now. That prevents the page from being prematurely stolen. */
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/* pthread_mutex_unlock(&lastFreepage_mutex); */
|
2004-08-17 01:46:17 +00:00
|
|
|
|
|
|
|
DEBUG("alloced rid = {%d, %d, %ld}\n", ret.page, ret.slot, ret.size);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
recordid slottedRawRalloc(Page * page, int size) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
writelock(page->rwlatch, 342);
|
|
|
|
|
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
rid.page = page->id;
|
|
|
|
rid.slot = *numslots_ptr(page);
|
|
|
|
rid.size = size;
|
|
|
|
|
2004-12-06 01:20:48 +00:00
|
|
|
/* new way - The freelist_ptr points to the first free slot number, which
|
|
|
|
is the head of a linked list of free slot numbers.*/
|
2004-07-30 01:28:39 +00:00
|
|
|
if(*freelist_ptr(page) != INVALID_SLOT) {
|
|
|
|
rid.slot = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = *slot_length_ptr(page, rid.slot);
|
|
|
|
*slot_length_ptr(page, rid.slot) = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__really_do_ralloc(page, rid);
|
|
|
|
|
|
|
|
/* DEBUG("slot: %d freespace: %d\n", rid.slot, freeSpace); */
|
|
|
|
|
|
|
|
writeunlock(page->rwlatch);
|
|
|
|
|
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __really_do_ralloc(Page * page, recordid rid) {
|
|
|
|
|
|
|
|
int freeSpace;
|
|
|
|
|
|
|
|
assert(rid.size > 0);
|
|
|
|
|
2004-10-19 21:16:37 +00:00
|
|
|
if(slottedFreespaceUnlocked(page) < rid.size) {
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedCompact(page);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
/* Make sure there's enough free space... */
|
2004-10-19 21:16:37 +00:00
|
|
|
assert (slottedFreespaceUnlocked(page) >= rid.size);
|
2004-07-30 01:28:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
freeSpace = *freespace_ptr(page);
|
|
|
|
|
|
|
|
|
|
|
|
if(*numslots_ptr(page) <= rid.slot) {
|
|
|
|
/* printf("Incrementing numSlots."); */
|
|
|
|
*numslots_ptr(page) = rid.slot + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG("Num slots %d\trid.slot %d\n", *numslots_ptr(page), rid.slot);
|
|
|
|
|
|
|
|
*freespace_ptr(page) = freeSpace + rid.size;
|
|
|
|
|
|
|
|
*slot_ptr(page, rid.slot) = freeSpace;
|
|
|
|
/* assert(!*slot_length_ptr(page, rid.slot) || (-1 == *slot_length_ptr(page, rid.slot)));*/
|
|
|
|
*slot_length_ptr(page, rid.slot) = rid.size;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
recordid slottedPostRalloc(Page * page, lsn_t lsn, recordid rid) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
writelock(page->rwlatch, 376);
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
if(*page_type_ptr(page) != SLOTTED_PAGE) {
|
|
|
|
/* slottedPreRalloc calls this when necessary. However, in
|
|
|
|
the case of a crash, it is possible that
|
|
|
|
slottedPreRalloc's updates were lost, so we need to check
|
|
|
|
for that here.
|
|
|
|
|
|
|
|
If slottedPreRalloc didn't call slottedPageInitialize,
|
|
|
|
then there would be a race condition:
|
|
|
|
|
|
|
|
Thread 1 Thread 2
|
|
|
|
preAlloc(big record)
|
|
|
|
|
|
|
|
preAlloc(big record) // Should check the freespace of the page and fail
|
|
|
|
postAlloc(big record)
|
|
|
|
|
|
|
|
postAlloc(big record) // Thread 2 stole my space! => Crash?
|
|
|
|
|
|
|
|
Note that this _will_ cause trouble if recovery is
|
|
|
|
multi-threaded, and allows the application to begin
|
|
|
|
updating the storefile without first locking any pages
|
|
|
|
that suffer from this problem.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
slottedPageInitialize(page);
|
|
|
|
}
|
|
|
|
|
2004-10-02 07:29:34 +00:00
|
|
|
if((*slot_length_ptr(page, rid.slot) == 0) || (*slot_ptr(page, rid.slot) == INVALID_SLOT)) {
|
|
|
|
/* if(*slot_ptr(page, rid.slot) == INVALID_SLOT) { */
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
__really_do_ralloc(page, rid);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/* int ijk = rid.size;
|
|
|
|
int lmn = *slot_length_ptr(page, rid.slot); */
|
2004-10-02 07:29:34 +00:00
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
assert((rid.size == *slot_length_ptr(page, rid.slot)) ||
|
|
|
|
(*slot_length_ptr(page, rid.slot) >= PAGE_SIZE));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-03 02:04:56 +00:00
|
|
|
pageWriteLSN(page, lsn);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
writeunlock(page->rwlatch);
|
|
|
|
|
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
2004-10-02 07:29:34 +00:00
|
|
|
void slottedDeRalloc(Page * page, lsn_t lsn, recordid rid) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
readlock(page->rwlatch, 443);
|
|
|
|
|
|
|
|
*slot_ptr(page, rid.slot) = INVALID_SLOT;
|
2004-10-02 07:29:34 +00:00
|
|
|
*slot_length_ptr(page, rid.slot) = *freelist_ptr(page);
|
|
|
|
*freelist_ptr(page) = rid.slot;
|
|
|
|
/* *slot_length_ptr(page, rid.slot) = 0; */
|
|
|
|
|
|
|
|
pageWriteLSN(page, lsn);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
unlock(page->rwlatch);
|
|
|
|
}
|
|
|
|
|
2004-10-19 21:16:37 +00:00
|
|
|
void slottedReadUnlocked(int xid, Page * page, recordid rid, byte *buff) {
|
|
|
|
int slot_length;
|
|
|
|
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
|
|
|
assert((rid.size == slot_length) || (slot_length >= PAGE_SIZE));
|
|
|
|
|
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
/*
|
|
|
|
This should trust the rid (since the caller needs to
|
|
|
|
override the size in special circumstances)
|
|
|
|
|
|
|
|
@todo If the rid size has been overridden, we should check to make
|
|
|
|
sure that this really is a special record.
|
|
|
|
*/
|
2004-08-17 01:46:17 +00:00
|
|
|
void slottedRead(int xid, Page * page, recordid rid, byte *buff) {
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
int slot_length;
|
|
|
|
readlock(page->rwlatch, 519);
|
|
|
|
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
|
|
|
assert((rid.size == slot_length) || (slot_length >= PAGE_SIZE));
|
|
|
|
|
|
|
|
if(!memcpy(buff, record_ptr(page, rid.slot), rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock(page->rwlatch);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
void slottedWrite(int xid, Page * page, lsn_t lsn, recordid rid, const byte *data) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int slot_length;
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
readlock(page->rwlatch, 529);
|
|
|
|
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
assert(rid.size < PAGE_SIZE);
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
|
|
|
assert((rid.size == slot_length) || (slot_length >= PAGE_SIZE));
|
|
|
|
|
|
|
|
if(!memcpy(record_ptr(page, rid.slot), data, rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2004-07-30 02:07:41 +00:00
|
|
|
/*page->LSN = lsn;
|
|
|
|
*lsn_ptr(page) = lsn * /
|
|
|
|
pageWriteLSN(page); */
|
|
|
|
unlock(page->rwlatch);
|
2004-07-30 01:28:39 +00:00
|
|
|
|
|
|
|
}
|
2004-10-19 21:16:37 +00:00
|
|
|
void slottedWriteUnlocked(int xid, Page * page, lsn_t lsn, recordid rid, const byte *data) {
|
|
|
|
int slot_length;
|
|
|
|
|
|
|
|
assert(rid.size < PAGE_SIZE);
|
|
|
|
assert(page->id == rid.page);
|
|
|
|
|
|
|
|
slot_length = *slot_length_ptr(page, rid.slot);
|
|
|
|
assert((rid.size == slot_length) || (slot_length >= PAGE_SIZE));
|
|
|
|
|
|
|
|
if(!memcpy(record_ptr(page, rid.slot), data, rid.size)) {
|
|
|
|
perror("memcpy");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
2004-07-30 01:28:39 +00:00
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
/*void slottedSetType(Page * p, int slot, int type) {
|
2004-07-30 01:28:39 +00:00
|
|
|
assert(type > PAGE_SIZE);
|
|
|
|
writelock(p->rwlatch, 686);
|
|
|
|
*slot_length_ptr(p, slot) = type;
|
|
|
|
unlock(p->rwlatch);
|
|
|
|
}
|
|
|
|
|
2004-08-17 01:46:17 +00:00
|
|
|
int slottedGetType(Page * p, int slot) {
|
2004-07-30 01:28:39 +00:00
|
|
|
int ret;
|
|
|
|
readlock(p->rwlatch, 693);
|
|
|
|
ret = *slot_length_ptr(p, slot);
|
|
|
|
unlock(p->rwlatch);
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
/ * getSlotType does the locking for us. * /
|
2004-07-30 01:28:39 +00:00
|
|
|
return ret > PAGE_SIZE ? ret : NORMAL_SLOT;
|
2004-08-21 00:03:30 +00:00
|
|
|
}*/
|