2010-01-23 02:13:59 +00:00
# include "logstore.h"
# include "datapage.h"
2010-03-17 21:51:26 +00:00
# include "regionAllocator.h"
2010-02-11 20:04:42 +00:00
# include <stasis/page.h>
static const int DATA_PAGE = USER_DEFINED_PAGE ( 1 ) ;
# define MAX_PAGE_COUNT 1000 // ~ 4MB
BEGIN_C_DECLS
static void dataPageFsck ( Page * p ) {
2010-02-15 23:02:01 +00:00
int32_t is_last_page = * stasis_page_int32_cptr_from_start ( p , 0 ) ;
assert ( is_last_page = = 0 | | is_last_page = = 1 | | is_last_page = = 2 ) ;
2010-02-11 20:04:42 +00:00
}
static void dataPageLoaded ( Page * p ) {
dataPageFsck ( p ) ;
}
static void dataPageFlushed ( Page * p ) {
* stasis_page_lsn_ptr ( p ) = p - > LSN ;
dataPageFsck ( p ) ;
}
static int notSupported ( int xid , Page * p ) { return 0 ; }
2010-02-15 23:02:01 +00:00
2010-02-11 20:04:42 +00:00
END_C_DECLS
2010-01-23 02:13:59 +00:00
template < class TUPLE >
2010-02-11 20:04:42 +00:00
void DataPage < TUPLE > : : register_stasis_page_impl ( ) {
static page_impl pi = {
DATA_PAGE ,
1 ,
0 , //slottedRead,
0 , //slottedWrite,
0 , // readDone
0 , // writeDone
0 , //slottedGetType,
0 , //slottedSetType,
0 , //slottedGetLength,
0 , //slottedFirst,
0 , //slottedNext,
0 , //slottedLast,
notSupported , // is block supported
stasis_block_first_default_impl ,
stasis_block_next_default_impl ,
stasis_block_done_default_impl ,
0 , //slottedFreespace,
0 , //slottedCompact,
0 , //slottedCompactSlotIDs,
0 , //slottedPreRalloc,
0 , //slottedPostRalloc,
0 , //slottedSpliceSlot,
0 , //slottedFree,
0 , //XXX page_impl_dereference_identity,
dataPageLoaded , //dataPageLoaded,
dataPageFlushed , //dataPageFlushed,
0 , //slottedCleanup
} ;
stasis_page_impl_register ( pi ) ;
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
}
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
template < class TUPLE >
2010-04-12 20:56:54 +00:00
DataPage < TUPLE > : : DataPage ( int xid , RegionAllocator * alloc , pageid_t pid ) : // XXX Hack!! The read-only constructor signature is too close to the other's
2010-02-15 23:02:01 +00:00
xid_ ( xid ) ,
page_count_ ( 1 ) , // will be opportunistically incremented as we scan the datapage.
initial_page_count_ ( - 1 ) , // used by append.
2010-04-12 20:56:54 +00:00
alloc_ ( alloc ) , // read-only, and we don't free data pages one at a time.
2010-02-11 20:04:42 +00:00
first_page_ ( pid ) ,
2010-04-12 20:56:54 +00:00
write_offset_ ( - 1 )
{
assert ( pid ! = 0 ) ;
Page * p = alloc_ ? alloc_ - > load_page ( xid , first_page_ ) : loadPage ( xid , first_page_ ) ;
if ( ! ( * is_another_page_ptr ( p ) = = 0 | | * is_another_page_ptr ( p ) = = 2 ) ) {
printf ( " Page %lld is not the start of a datapage \n " , first_page_ ) ; fflush ( stdout ) ;
abort ( ) ;
}
assert ( * is_another_page_ptr ( p ) = = 0 | | * is_another_page_ptr ( p ) = = 2 ) ; // would be 1 for page in the middle of a datapage
releasePage ( p ) ;
2010-02-15 23:02:01 +00:00
}
2010-01-23 02:13:59 +00:00
template < class TUPLE >
2010-02-15 23:02:01 +00:00
DataPage < TUPLE > : : DataPage ( int xid , pageid_t page_count , RegionAllocator * alloc ) :
xid_ ( xid ) ,
page_count_ ( 1 ) ,
initial_page_count_ ( page_count ) ,
alloc_ ( alloc ) ,
first_page_ ( alloc_ - > alloc_extent ( xid_ , page_count_ ) ) ,
2010-02-11 20:04:42 +00:00
write_offset_ ( 0 )
2010-01-23 02:13:59 +00:00
{
2010-03-31 22:54:56 +00:00
DEBUG ( " Datapage page count: %lld pid = %lld \n " , ( long long int ) initial_page_count_ , ( long long int ) first_page_ ) ;
2010-02-15 23:02:01 +00:00
assert ( page_count_ > = 1 ) ;
initialize ( ) ;
2010-01-23 02:13:59 +00:00
}
template < class TUPLE >
2010-03-05 19:56:50 +00:00
void DataPage < TUPLE > : : initialize ( ) {
initialize_page ( first_page_ ) ;
}
template < class TUPLE >
void DataPage < TUPLE > : : initialize_page ( pageid_t pageid ) {
2010-01-23 02:13:59 +00:00
//load the first page
2010-03-05 19:56:50 +00:00
Page * p ;
# ifdef CHECK_FOR_SCRIBBLING
2010-04-12 20:56:54 +00:00
p = alloc_ ? alloc - > load_page ( xid_ , pageid ) : loadPage ( xid_ , pageid ) ;
2010-03-05 19:56:50 +00:00
if ( * stasis_page_type_ptr ( p ) = = DATA_PAGE ) {
printf ( " Collision on page %lld \n " , ( long long ) pageid ) ; fflush ( stdout ) ;
assert ( * stasis_page_type_ptr ( p ) ! = DATA_PAGE ) ;
}
# else
p = loadUninitializedPage ( xid_ , pageid ) ;
# endif
2010-01-23 02:13:59 +00:00
2010-03-05 19:56:50 +00:00
DEBUG ( " \t \t \t \t \t \t ->%lld \n " , pageid ) ;
2010-02-15 23:02:01 +00:00
2010-01-23 02:13:59 +00:00
//initialize header
2010-02-15 23:02:01 +00:00
p - > pageType = DATA_PAGE ;
2010-01-23 02:13:59 +00:00
2010-07-14 22:42:26 +00:00
//clear page (arranges for null-padding)
memset ( p - > memAddr , 0 , PAGE_SIZE ) ;
2010-02-15 23:02:01 +00:00
//we're the last page for now.
* is_another_page_ptr ( p ) = 0 ;
2010-01-23 02:13:59 +00:00
//write 0 to first data size
2010-03-05 19:56:50 +00:00
* length_at_offset_ptr ( p , calc_chunk_from_offset ( write_offset_ ) . slot ) = 0 ;
2010-01-23 02:13:59 +00:00
//set the page dirty
2010-03-09 23:48:42 +00:00
stasis_page_lsn_write ( xid_ , p , alloc_ - > get_lsn ( xid_ ) ) ;
2010-02-11 20:04:42 +00:00
2010-01-23 02:13:59 +00:00
releasePage ( p ) ;
}
template < class TUPLE >
2010-08-05 17:43:46 +00:00
size_t DataPage < TUPLE > : : write_bytes ( const byte * buf , ssize_t remaining , Page * * latch_p ) {
if ( latch_p ) { * latch_p = NULL ; }
recordid chunk = calc_chunk_from_offset ( write_offset_ ) ;
if ( chunk . size > remaining ) {
chunk . size = remaining ;
}
if ( chunk . page > = first_page_ + page_count_ ) {
chunk . size = 0 ; // no space (should not happen)
} else {
Page * p = alloc_ ? alloc_ - > load_page ( xid_ , chunk . page ) : loadPage ( xid_ , chunk . page ) ;
assert ( chunk . size ) ;
memcpy ( data_at_offset_ptr ( p , chunk . slot ) , buf , chunk . size ) ;
stasis_page_lsn_write ( xid_ , p , alloc_ - > get_lsn ( xid_ ) ) ;
if ( latch_p & & ! * latch_p ) {
writelock ( p - > rwlatch , 0 ) ;
* latch_p = p ;
} else {
releasePage ( p ) ;
}
write_offset_ + = chunk . size ;
}
return chunk . size ;
2010-02-11 20:04:42 +00:00
}
template < class TUPLE >
2010-07-16 21:43:21 +00:00
size_t DataPage < TUPLE > : : read_bytes ( byte * buf , off_t offset , ssize_t remaining ) {
2010-02-11 20:04:42 +00:00
recordid chunk = calc_chunk_from_offset ( offset ) ;
if ( chunk . size > remaining ) {
chunk . size = remaining ;
}
if ( chunk . page > = first_page_ + page_count_ ) {
chunk . size = 0 ; // eof
} else {
2010-04-12 20:56:54 +00:00
Page * p = alloc_ ? alloc_ - > load_page ( xid_ , chunk . page ) : loadPage ( xid_ , chunk . page ) ;
2010-02-15 23:02:01 +00:00
assert ( p - > pageType = = DATA_PAGE ) ;
if ( ( chunk . page + 1 = = page_count_ + first_page_ )
& & ( * is_another_page_ptr ( p ) ) ) {
page_count_ + + ;
}
2010-02-11 20:04:42 +00:00
memcpy ( buf , data_at_offset_ptr ( p , chunk . slot ) , chunk . size ) ;
releasePage ( p ) ;
}
return chunk . size ;
}
2010-02-10 21:49:50 +00:00
2010-02-11 20:04:42 +00:00
template < class TUPLE >
2010-02-15 23:02:01 +00:00
bool DataPage < TUPLE > : : initialize_next_page ( ) {
2010-02-11 20:04:42 +00:00
recordid rid = calc_chunk_from_offset ( write_offset_ ) ;
assert ( rid . slot = = 0 ) ;
2010-02-15 23:02:01 +00:00
DEBUG ( " \t \t %lld \n " , ( long long ) rid . page ) ;
2010-02-11 20:04:42 +00:00
if ( rid . page > = first_page_ + page_count_ ) {
2010-02-15 23:02:01 +00:00
assert ( rid . page = = first_page_ + page_count_ ) ;
if ( alloc_ - > grow_extent ( 1 ) ) {
page_count_ + + ;
} else {
return false ; // The region is full
}
2010-02-11 20:04:42 +00:00
} else {
2010-02-15 23:02:01 +00:00
abort ( ) ;
2010-02-11 20:04:42 +00:00
}
2010-02-15 23:02:01 +00:00
2010-04-12 20:56:54 +00:00
Page * p = alloc_ ? alloc_ - > load_page ( xid_ , rid . page - 1 ) : loadPage ( xid_ , rid . page - 1 ) ;
2010-02-15 23:02:01 +00:00
* is_another_page_ptr ( p ) = ( rid . page - 1 = = first_page_ ) ? 2 : 1 ;
2010-03-09 23:48:42 +00:00
stasis_page_lsn_write ( xid_ , p , alloc_ - > get_lsn ( xid_ ) ) ;
2010-02-15 23:02:01 +00:00
releasePage ( p ) ;
2010-03-05 19:56:50 +00:00
initialize_page ( rid . page ) ;
2010-02-15 23:02:01 +00:00
return true ;
2010-01-23 02:13:59 +00:00
}
2010-08-05 17:43:46 +00:00
template < class TUPLE >
Page * DataPage < TUPLE > : : write_data_and_latch ( const byte * buf , size_t len , bool init_next , bool latch ) {
bool first = true ;
Page * p = 0 ;
while ( 1 ) {
assert ( len > 0 ) ;
// if(latch) {
// if(first) { assert(!p); } else { assert(p); }
// } else {
// assert(!p);
// }
size_t written ;
if ( latch & & first ) {
written = write_bytes ( buf , len , & p ) ;
} else {
written = write_bytes ( buf , len ) ;
}
if ( written = = 0 ) {
assert ( ! p ) ;
return 0 ; // fail
}
if ( written = = len ) {
if ( latch ) {
return p ;
} else {
// assert(!p);
return ( Page * ) 1 ;
}
}
if ( len > PAGE_SIZE & & ! first ) {
assert ( written > 4000 ) ;
}
buf + = written ;
len - = written ;
if ( init_next ) {
if ( ! initialize_next_page ( ) ) {
if ( p ) {
// assert(latch);
unlock ( p - > rwlatch ) ;
releasePage ( p ) ;
}
return 0 ; // fail
}
}
first = false ;
}
}
2010-01-23 02:13:59 +00:00
template < class TUPLE >
2010-02-15 23:02:01 +00:00
bool DataPage < TUPLE > : : write_data ( const byte * buf , size_t len , bool init_next ) {
2010-08-05 17:43:46 +00:00
return 0 ! = write_data_and_latch ( buf , len , init_next , false ) ;
2010-02-11 20:04:42 +00:00
}
template < class TUPLE >
2010-02-15 23:02:01 +00:00
bool DataPage < TUPLE > : : read_data ( byte * buf , off_t offset , size_t len ) {
2010-02-11 20:04:42 +00:00
while ( 1 ) {
assert ( len > 0 ) ;
2010-02-15 23:02:01 +00:00
size_t read_count = read_bytes ( buf , offset , len ) ;
2010-02-11 20:04:42 +00:00
if ( read_count = = 0 ) {
return false ; // fail
}
if ( read_count = = len ) {
return true ; // success
}
buf + = read_count ;
offset + = read_count ;
len - = read_count ;
}
}
template < class TUPLE >
2010-02-15 23:02:01 +00:00
bool DataPage < TUPLE > : : append ( TUPLE const * dat )
2010-01-23 02:13:59 +00:00
{
2010-02-15 23:02:01 +00:00
// Don't append record to already-full datapage. The record could push us over the page limit, but that's OK.
2010-03-24 20:30:35 +00:00
if ( write_offset_ > ( initial_page_count_ * PAGE_SIZE ) ) {
DEBUG ( " offset %lld closing datapage \n " , write_offset_ ) ;
return false ;
}
DEBUG ( " offset %lld continuing datapage \n " , write_offset_ ) ;
2010-02-15 23:02:01 +00:00
2010-02-11 20:04:42 +00:00
byte * buf = dat - > to_bytes ( ) ; // TODO could be more efficient; this does a malloc and memcpy. The alternative couples us more strongly to datapage, but simplifies datapage.
len_t dat_len = dat - > byte_length ( ) ;
2010-01-23 02:13:59 +00:00
2010-08-05 17:43:46 +00:00
Page * p = write_data_and_latch ( ( const byte * ) & dat_len , sizeof ( dat_len ) ) ;
bool succ = false ;
if ( p ) {
2010-02-15 23:02:01 +00:00
succ = write_data ( buf , dat_len ) ;
2010-08-05 17:43:46 +00:00
unlock ( p - > rwlatch ) ;
releasePage ( p ) ;
2010-02-11 20:04:42 +00:00
}
free ( buf ) ;
return succ ;
2010-01-23 02:13:59 +00:00
}
template < class TUPLE >
2010-07-14 22:42:26 +00:00
bool DataPage < TUPLE > : : recordRead ( const typename TUPLE : : key_t key , size_t keySize , TUPLE * * buf )
2010-01-23 02:13:59 +00:00
{
2010-03-09 00:14:24 +00:00
iterator itr ( this , NULL ) ;
2010-01-23 02:13:59 +00:00
int match = - 1 ;
2010-02-15 23:02:01 +00:00
while ( ( * buf = itr . getnext ( ) ) ! = 0 )
2010-01-23 02:13:59 +00:00
{
2010-02-23 17:05:47 +00:00
match = TUPLE : : compare ( ( * buf ) - > key ( ) , ( * buf ) - > keylen ( ) , key , keySize ) ;
2010-01-23 02:13:59 +00:00
if ( match < 0 ) //keep searching
{
2010-02-10 21:49:50 +00:00
datatuple : : freetuple ( * buf ) ;
2010-01-23 02:13:59 +00:00
* buf = 0 ;
}
else if ( match = = 0 ) //found
{
return true ;
}
else // match > 0, then does not exist
{
2010-02-10 21:49:50 +00:00
datatuple : : freetuple ( * buf ) ;
2010-01-23 02:13:59 +00:00
* buf = 0 ;
break ;
}
}
return false ;
}
///////////////////////////////////////////////////////////////
//RECORD ITERATOR
///////////////////////////////////////////////////////////////
template < class TUPLE >
2010-03-09 00:14:24 +00:00
TUPLE * DataPage < TUPLE > : : iterator : : getnext ( )
2010-01-23 02:13:59 +00:00
{
2010-02-11 20:04:42 +00:00
len_t len ;
bool succ ;
2010-02-25 01:29:32 +00:00
if ( dp = = NULL ) { return NULL ; }
2010-08-05 17:43:46 +00:00
// XXX hack: read latch the page that the record will live on.
// This should be handled by a read_data_in_latch function, or something...
Page * p = loadPage ( dp - > xid_ , dp - > calc_chunk_from_offset ( read_offset_ ) . page ) ;
readlock ( p - > rwlatch , 0 ) ;
2010-02-15 23:02:01 +00:00
succ = dp - > read_data ( ( byte * ) & len , read_offset_ , sizeof ( len ) ) ;
2010-08-12 01:55:30 +00:00
if ( ( ! succ ) | | ( len = = 0 ) ) {
unlock ( p - > rwlatch ) ;
releasePage ( p ) ;
return NULL ;
}
2010-02-11 20:04:42 +00:00
read_offset_ + = sizeof ( len ) ;
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
byte * buf = ( byte * ) malloc ( len ) ;
2010-02-15 23:02:01 +00:00
succ = dp - > read_data ( buf , read_offset_ , len ) ;
2010-01-23 02:13:59 +00:00
2010-08-05 17:43:46 +00:00
// release hacky latch
unlock ( p - > rwlatch ) ;
releasePage ( p ) ;
2010-02-11 20:04:42 +00:00
if ( ! succ ) { read_offset_ - = sizeof ( len ) ; free ( buf ) ; return NULL ; }
2010-02-10 21:49:50 +00:00
2010-02-11 20:04:42 +00:00
read_offset_ + = len ;
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
TUPLE * ret = TUPLE : : from_bytes ( buf ) ;
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
free ( buf ) ;
2010-01-23 02:13:59 +00:00
2010-02-11 20:04:42 +00:00
return ret ;
2010-01-23 02:13:59 +00:00
}
2010-03-13 00:41:37 +00:00
template class DataPage < datatuple > ;