Initial ROSE check in.

This commit is contained in:
Sears Russell 2007-10-15 17:46:44 +00:00
parent cb7cd6bc88
commit cd5ec5f70c
13 changed files with 3001 additions and 2 deletions

View file

@ -1,11 +1,14 @@
LDADD=$(top_builddir)/src/stasis/libstasis.la \
$(top_builddir)/src/libdfa/librw.la
rose_SOURCES=rose.cpp
if BUILD_BENCHMARKS
noinst_PROGRAMS=lhtableThreaded naiveHash logicalHash readLogicalHash naiveMultiThreaded logicalMultThreaded rawSet \
arrayListSet logicalMultiReaders linearHashNTA linkedListNTA pageOrientedListNTA \
linearHashNTAThreaded linearHashNTAMultiReader linearHashNTAWriteRequests transitiveClosure zeroCopy sequentialThroughput
linearHashNTAThreaded linearHashNTAMultiReader linearHashNTAWriteRequests transitiveClosure zeroCopy sequentialThroughput rose
endif
AM_CFLAGS=${GLOBAL_CFLAGS}
AM_CXXFLAGS=${GLOBAL_CXXFLAGS} -I ${top_builddir}/src
SUBDIRS=berkeleyDB

1495
benchmarks/rose.cpp Normal file

File diff suppressed because it is too large Load diff

5
benchmarks/rose.sh Executable file
View file

@ -0,0 +1,5 @@
#!/bin/sh
ROSE=./rose
$ROSE -i 10000000 -t && $ROSE -i 10000000 && $ROSE -i 100000 -t && $ROSE -i 100000 && $ROSE -i 1000 -t && $ROSE -i 1000 && $ROSE -i 10 -t && $ROSE -i 10 && $ROSE -i 1 -t && $ROSE -i 1 && $ROSE -i 0 -t && $ROSE -i 0 && $ROSE -l -i 10000000 -t && $ROSE -l -i 10000000 && $ROSE -l -i 100000 -t && $ROSE -l -i 100000 && $ROSE -l -i 1000 -t && $ROSE -l -i 1000 && $ROSE -l -i 10 -t && $ROSE -l -i 10 && $ROSE -l -i 1 -t && $ROSE -l -i 1 && $ROSE -l -i 0 -t && $ROSE -l -i 0 && $ROSE -n 5 -i 10000000 -t && $ROSE -n 5 -i 10000000 && $ROSE -n 5 -i 100000 -t && $ROSE -n 5 -i 100000 && $ROSE -n 5 -i 1000 -t && $ROSE -n 5 -i 1000 && $ROSE -n 5 -i 10 -t && $ROSE -n 5 -i 10 && $ROSE -n 5 -i 1 -t && $ROSE -n 5 -i 1 && $ROSE -n 5 -i 0 -t && $ROSE -n 5 -i 0 && $ROSE -l -n 5 -i 10000000 -t && $ROSE -l -n 5 -i 10000000 && $ROSE -l -n 5 -i 100000 -t && $ROSE -l -n 5 -i 100000 && $ROSE -l -n 5 -i 1000 -t && $ROSE -l -n 5 -i 1000 && $ROSE -l -n 5 -i 10 -t && $ROSE -l -n 5 -i 10 && $ROSE -l -n 5 -i 1 -t && $ROSE -l -n 5 -i 1 && $ROSE -l -n 5 -i 0 -t && $ROSE -l -n 5 -i 0

View file

@ -0,0 +1,77 @@
#include <limits.h>
#ifndef _ROSE_COMPRESSION_COMPRESSION_H__
#define _ROSE_COMPRESSION_COMPRESSION_H__
namespace rose {
typedef int8_t record_size_t;
typedef uint16_t byte_off_t;
typedef uint32_t slot_index_t;
typedef uint8_t plugin_id_t;
typedef uint8_t column_number_t;
typedef uint16_t column_offset_t;
static const record_size_t VARIABLE_SIZE = CHAR_MAX;
static const slot_index_t NOSPACE = UINT_MAX;
static const slot_index_t EXCEPTIONAL = UINT_MAX-1;
static const slot_index_t MAX_INDEX = UINT_MAX-2;
/**
This function computes a page type (an integer stored in the page header)
so that Stasis can dispatch calls to the appropriate page implemenation.
Most page types choose a single constant, but pstar's page layout varies
across different template instantiations. In particular, the page layout
depends on sizeof(TYPE), and upon COMPRESOR. Finally, pstar and the
compressors behave differently depending on whether or not TYPE is signed
or unsigned. (Non integer types are currently not supported.)
Right now, everything happens to be a power of two and the page
type is of this form:
BASE_PAGEID + 00PCSII(base2)
P stores the page format PAGE_FORMAT_ID
C stores the compressor PLUGIN_ID.
S is 1 iff the type is signed
II is 00, 01, 10, 11 depending on the sizeof(type)
Although the on disk representation is bigger; stasis tries to keep page
types within the range 0 - 255.
*/
template <class PAGEFORMAT, class COMPRESSOR, class TYPE>
plugin_id_t plugin_id() {
/* type_idx maps from sizeof(TYPE) to a portion of a page type:
(u)int8_t -> 0
(u)int16_t -> 1
(u)int32_t -> 2
(u)int64_t -> 3
*/
// Number of bytes in type ---> 1 2 4 8
static const int type_idx[] = { -1, 0, 1, -1, 2, -1, -1, -1, 3 };
static const int idx_count = 4;
static const TYPE is_signed = 0 - 1;
// assert(sizeof(TYPE) <= 8 && type_idx[sizeof(TYPE)] >= 0);
// XXX first '2' hardcodes the number of COMPRESSOR implementations...
plugin_id_t ret = USER_DEFINED_PAGE(0)
// II S C
+ idx_count * 2 * 2 * PAGEFORMAT::PAGE_FORMAT_ID
+ idx_count * 2 * COMPRESSOR::PLUGIN_ID
+ idx_count * (is_signed < 0)
+ type_idx[sizeof(TYPE)];
return ret;
}
}
#endif // _ROSE_COMPRESSION_COMPRESSION_H__

View file

@ -0,0 +1,87 @@
#ifndef _ROSE_COMPRESSION_FOR_IMPL_H__
#define _ROSE_COMPRESSION_FOR_IMPL_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
#include <assert.h>
#include "for.h"
namespace rose {
template <class TYPE>
inline void
For<TYPE>::offset(TYPE o) {
assert(*numdeltas_ptr() == 0);
*base_ptr() = o;
}
/**
Store a new value as a delta from the page's base offset, then update
numdeltas_ptr so that we remember that we stored the value.
*/
template <class TYPE>
inline slot_index_t
For<TYPE>::append(int xid, const TYPE dat,
byte_off_t* except, byte* exceptions, //char *exceptional,
int *free_bytes) {
// Can dat be represented as a delta from the page's base value?
// XXX this can overflow if dat and / or offset are 64 bit...
int64_t delta = (int64_t)dat - (int64_t)offset();
if(delta > DELTA_MAX || delta < 0) {
// Store dat's location as a delta
*next_delta_ptr() = *except - PAGE_SIZE;
// Store dat as an exception
*(((TYPE*)(&exceptions[*except]))-1) = dat;
// Allocate the delta and the exception (if possible)
*free_bytes -= sizeof(TYPE) + sizeof(delta_t);
int incr = *free_bytes >= 0;
*numdeltas_ptr() += incr;
*except -= incr * sizeof(TYPE);
/* This does the same thing as the last few lines, but with a branch. It's
marginally slower:
*next_delta_ptr() = *except - PAGE_SIZE;
*free_bytes -= sizeof(TYPE) + sizeof(delta_t);
if(*free_bytes >= 0) {
(*numdeltas_ptr())++;
*except -= sizeof(TYPE);
*(TYPE*)(&exceptions[*except]) = dat;
} */
} else {
// Store the delta
*next_delta_ptr() = (delta_t) delta;
// Allocate space for it, if possible
*free_bytes -= sizeof(delta_t);
*numdeltas_ptr() += *free_bytes >= 0;
}
return *numdeltas_ptr() - 1;
}
template <class TYPE>
inline TYPE *
For<TYPE>::recordRead(int xid, slot_index_t slot, byte *exceptions,
TYPE * scratch) {
if (slot >= *numdeltas_ptr()) {
return 0;
}
delta_t d = *nth_delta_ptr(slot);
if (d >= 0) {
*scratch = d + *base_ptr();
return scratch;
} else {
*scratch = *(TYPE*)(exceptions + d + PAGE_SIZE - sizeof(TYPE));
return scratch;
}
}
} // namespace rose
#endif // _ROSE_COMPRESSION_FOR_IMPL_H__

View file

@ -0,0 +1,158 @@
#ifndef _ROSE_COMPRESSION_FOR_H__
#define _ROSE_COMPRESSION_FOR_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
/**
@file Implementation of Frame of Reference compression
This file implements a COMPRESSOR plugin that handles compresion
for a column of data in a page. Rather than hardcoding a
particular page layout, these plugins work with two ranges of
memory that must be contiguous. The first range contains the
compressed data. A pointer to this region is stored in the mem_
member of this class. The second region is shared amongst multiple
compressor implementations and is passed into methods via the
"exceptions" parameter. A second parameter, "except" provides the
offset of the first byte of exceptions that is in use. If
necessary, the compressor implementation may store data as an
exception, by prepending it to the exceptions array, and
decrementing "except".
A third parameter, "free_bytes" is used to manage byte allocation
out of some global (per page) pool. If free_bytes becomes
negative, the page is out of space, and all further allocation
attempts will fail. Compressor plugins modify it as more data is
inserted into the page.
Finally, the compressor may define a volatile region of bytes after
the end of the compressed data. This region is used as scratch
space, and will not be written to disk. However, it <i>is</i>
deducted from the total number of free bytes on the page, wasting a
small amount of storage on disk. The max_overrun() method returns
the size of this extra scratch space buffer.
*/
#include <limits.h>
#include "pstar.h"
namespace rose {
template <class TYPE>
class For {
public:
static const int PLUGIN_ID = 0;
/**
Set the page offset. For frame of reference, this is used to
calculate deltas.
*/
inline void offset(TYPE o);
/**
The size of the scratch space reserved at the end of the page for
speculative execution.
*/
inline size_t max_overrun() { return sizeof(delta_t) + sizeof(TYPE); }
/**
Append a new value to a compressed portion of a page. This
function is meant to be called by pstar, not by end users.
@param xid the transaction that is appending the value (currently unused)
@param dat contains the value to be appended to the end of the page.
@param except the offset of the first exceptional value on the page. This
should initially be set to the end of the exceptional array;
append() will modify it as needed.
@param exceptions a pointer to the beginning of the exceptions region.
@param free_bytes The number of free bytes on the page. This number will be
decremented (or incremented) by append to reflect changes in the
number of bytes in use.
@return The slot index of the newly returned value or an undefined if the
page is too full to accomodate the value (that is, free_bytes is
negative). Implementations may set free_bytes to a negative value if
an implementation defined limit prevents them from accomodating more
data, even if there are free bytes on the page.
*/
inline slot_index_t append(int xid, const TYPE dat,
byte_off_t * except, byte * exceptions,
int * free_bytes);
/**
Read a compressed value. This function is meant to be called by
pstar, not by end users.
@param xid Tracked for locking. Currently unused.
@param slot The index number of the slot that should be read.
@param exceptions A byte array that contains the exceptional values.
@param buf Storage space for the value to be read. This function will
return this pointer after succesfully reading the value.
The caller manages the memory passed via buf.
@return NULL if the slot is off the end of the page, or buf if the
record exists, and has been read.
@see Pstar::recordRead() and Multicolumn::recordRead()
*/
inline TYPE *recordRead(int xid, slot_index_t slot, byte *exceptions,
TYPE * buf);
/**
This constructor initializes a new FOR region.
@param xid the transaction that created the new region.
*/
For(int xid, void * mem): mem_(mem) {
*base_ptr() = -1;
*numdeltas_ptr() = 0;
};
For(void * mem): mem_(mem) { }
For() : mem_(0) {}
/**
@return the length of the FOR region, in bytes
*/
inline byte_off_t bytes_used() {
return ((intptr_t)(last_delta_ptr()+1)) - (intptr_t)mem_;
}
inline void mem(byte * mem) { mem_ = mem; }
inline void init_mem(byte * mem) {
mem_=mem;
*base_ptr() = -1;
*numdeltas_ptr() = 0;
}
private:
/*typedef int8_t delta_t;
static const delta_t DELTA_MAX = CHAR_MAX;
static const delta_t DELTA_MIN = CHAR_MIN;*/
typedef int16_t delta_t;
static const delta_t DELTA_MAX = SHRT_MAX;
static const delta_t DELTA_MIN = SHRT_MIN;
/*typedef int32_t delta_t;
static const delta_t DELTA_MAX = INT_MAX;
static const delta_t DELTA_MIN = INT_MIN;*/
inline TYPE offset() { return *base_ptr(); }
inline TYPE* base_ptr() { return reinterpret_cast<TYPE*>(mem_); }
inline slot_index_t* numdeltas_ptr() {
return reinterpret_cast<slot_index_t*>(base_ptr()+1);
}
inline delta_t * nth_delta_ptr(slot_index_t n) {
return reinterpret_cast<delta_t*>(numdeltas_ptr()+1) + n;
}
inline delta_t * last_delta_ptr() {
return nth_delta_ptr(*numdeltas_ptr()-1);
}
inline delta_t * next_delta_ptr() {
return nth_delta_ptr(*numdeltas_ptr());
}
void * mem_;
};
} // namespace rose
#endif // _ROSE_COMPRESSION_FOR_H__

View file

@ -0,0 +1,266 @@
#ifndef _ROSE_COMPRESSION_MULTICOLUMN_IMPL_H__
#define _ROSE_COMPRESSION_MULTICOLUMN_IMPL_H__
#include "multicolumn.h"
namespace rose {
/**
Initialize a new multicolumn page
*/
template <class TUPLE>
Multicolumn<TUPLE>::Multicolumn(int xid, Page *p, column_number_t column_count,
plugin_id_t * plugins) :
p_(p),
columns_(new byte*[column_count]),
first_exception_byte_(USABLE_SIZE_OF_PAGE),
exceptions_(new byte[USABLE_SIZE_OF_PAGE]),
dispatcher_(column_count),
unpacked_(1)
{
*column_count_ptr() = column_count;
bytes_left_ = first_header_byte_ptr()- p->memAddr;
for(int i = 0; i < column_count; i++) {
*column_plugin_id_ptr(i) = plugins[i];
columns_[i] = new byte[USABLE_SIZE_OF_PAGE];
dispatcher_.set_plugin(columns_[i],i,plugins[i]);
dispatcher_.init_mem(columns_[i],i);
bytes_left_ -= dispatcher_.bytes_used(i);
}
*stasis_page_type_ptr(p) = plugin_id();
p->impl = this;
}
/**
XXX this eagerly unpacks the page at load; that's a waste of
processor time and RAM, as read-only pages don't need to be
unpacked.
*/
template<class TUPLE>
Multicolumn<TUPLE>::Multicolumn(Page * p) :
p_(p),
columns_(new byte*[*column_count_ptr()]),
first_exception_byte_(USABLE_SIZE_OF_PAGE - *exceptions_len_ptr()),
exceptions_(p_->memAddr + *exceptions_offset_ptr()),
dispatcher_(*column_count_ptr()),
unpacked_(0) {
byte_off_t first_free = 0;
for(int i = 0; i < *column_count_ptr(); i++) {
byte * page_column_ptr = p_->memAddr + *column_offset_ptr(i);
dispatcher_.set_plugin(page_column_ptr,i, *column_plugin_id_ptr(i));
byte_off_t column_length = dispatcher_.bytes_used(i);
columns_[i] = p_->memAddr + *column_offset_ptr(i);
dispatcher_.set_plugin(columns_[i],i, *column_plugin_id_ptr(i));
first_free = *column_offset_ptr(i) + column_length;
}
assert(first_free <= *exceptions_offset_ptr());
assert(first_exception_byte_ <= USABLE_SIZE_OF_PAGE);
bytes_left_ = *exceptions_offset_ptr() - first_free;
assert(*stasis_page_type_ptr(p) == Multicolumn<TUPLE>::plugin_id());
}
template <class TUPLE>
void Multicolumn<TUPLE>::pack() {
byte_off_t first_free = 0;
byte_off_t last_free = (intptr_t)(first_header_byte_ptr() - p_->memAddr);
if(unpacked_) {
*exceptions_len_ptr() = USABLE_SIZE_OF_PAGE - first_exception_byte_;
last_free -= *exceptions_len_ptr();
*exceptions_offset_ptr() = last_free;
memcpy(&(p_->memAddr[*exceptions_offset_ptr()]),
exceptions_ + first_exception_byte_, *exceptions_len_ptr());
for(int i = 0; i < *column_count_ptr(); i++) {
*column_offset_ptr(i) = first_free;
byte_off_t bytes_used = dispatcher_.bytes_used(i);
memcpy(column_base_ptr(i), columns_[i], bytes_used);
first_free += bytes_used;
assert(first_free <= last_free);
delete [] columns_[i];
columns_[i] = column_base_ptr(i);
dispatcher_.mem(columns_[i],i); //compressor(i))->mem(columns_[i]);
}
delete [] exceptions_;
exceptions_ = p_->memAddr + *exceptions_offset_ptr();
unpacked_ = 0;
}
}
template <class TUPLE>
Multicolumn<TUPLE>::~Multicolumn() {
byte_off_t first_free = 0;
byte_off_t last_free = (intptr_t)(first_header_byte_ptr() - p_->memAddr);
if(unpacked_) {
*exceptions_len_ptr() = USABLE_SIZE_OF_PAGE - first_exception_byte_;
last_free -= *exceptions_len_ptr();
*exceptions_offset_ptr() = last_free;
memcpy(&(p_->memAddr[*exceptions_offset_ptr()]),
exceptions_ + first_exception_byte_, *exceptions_len_ptr());
for(int i = 0; i < *column_count_ptr(); i++) {
*column_offset_ptr(i) = first_free;
byte_off_t bytes_used = dispatcher_.bytes_used(i);
memcpy(column_base_ptr(i), columns_[i], bytes_used);
first_free += bytes_used;
assert(first_free <= last_free);
delete [] columns_[i];
}
delete [] exceptions_;
}
delete [] columns_;
}
/// Begin performance-critical code -------------------------------------------
/**
Append a record to the page. This function is complicated by
the fact that each column was produced by a potentially
different template instantiation. Rather than harcode
compressor implementations, or fall back on virtual methods,
this function delegates compressor calls to PluginDispatcher.
Pstar<> (and potential future implementations of multicolumn)
benefit from this scheme as they can hardcode compressors at
compile time, allowing the correct append method to be inlined,
rather than invoked via a virtual method.
*/
template <class TUPLE>
inline slot_index_t Multicolumn<TUPLE>::append(int xid,
TUPLE const & dat) {
slot_index_t ret = NOSPACE;
column_number_t i = 0;
const column_number_t cols = dat.column_count();
do {
slot_index_t newret = dispatcher_.recordAppend(xid, i, dat.get(i),
&first_exception_byte_,
exceptions_, &bytes_left_);
//assert(ret == NOSPACE || newret == NOSPACE || newret == ret);
ret = newret;
i++;
} while(i < cols);
return bytes_left_ < 0 ? NOSPACE : ret;
}
/**
Read a record (tuple) from the page.
@see append for a discussion of the implementation and
associated design tradeoffs.
*/
template <class TUPLE>
inline TUPLE* Multicolumn<TUPLE>::recordRead(int xid, slot_index_t slot,
TUPLE *buf) {
column_number_t i = 0;
column_number_t cols = buf->column_count();
do {
void * ret = dispatcher_.recordRead(xid,columns_[i],i,slot,exceptions_,
buf->get(i));
if(!ret) {
return 0;
}
i++;
} while(i < cols);
return buf;
}
/// End performance-critical code ---------------------------------------------
/// Stuff below this line interfaces with Stasis' buffer manager --------------
/**
Basic page_impl for multicolumn pages
@see stasis/page.h and pstar-impl.h
*/
static const page_impl multicolumn_impl = {
-1,
0, // multicolumnRead,
0, // multicolumnWrite,
0, // multicolumnReadDone,
0, // multicolumnWriteDone,
0, // multicolumnGetType,
0, // multicolumnSetType,
0, // multicolumnGetLength,
0, // multicolumnFirst,
0, // multicolumnNext,
0, // multicolumnIsBlockSupported,
0, // multicolumnBlockFirst,
0, // multicolumnBlockNext,
0, // multicolumnBlockDone,
0, // multicolumnFreespace,
0, // multicolumnCompact,
0, // multicolumnPreRalloc,
0, // multicolumnPostRalloc,
0, // multicolumnFree,
0, // dereference_identity,
0, // multicolumnLoaded,
0, // multicolumnFlushed
0, // multicolumnCleanup
};
// XXX implement plugin_id(). Currently, it treats all instantiations of the
// same TUPLE template interchangably; this will break for binaries that
// manipulate more than one type of tuple..
template <class TUPLE>
inline plugin_id_t
Multicolumn<TUPLE>::plugin_id() {
return USER_DEFINED_PAGE(0) + 32 + TUPLE::TUPLE_ID;
}
template <class TUPLE>
void multicolumnLoaded(Page *p) {
p->LSN = *stasis_page_lsn_ptr(p);
assert(*stasis_page_type_ptr(p) == Multicolumn<TUPLE>::plugin_id());
p->impl = new Multicolumn<TUPLE>(p);
}
template <class TUPLE>
static void multicolumnFlushed(Page *p) {
*stasis_page_lsn_ptr(p) = p->LSN;
((Multicolumn<TUPLE>*)(p->impl))->pack();
}
template <class TUPLE>
static void multicolumnCleanup(Page *p) {
delete (Multicolumn<TUPLE>*)p->impl;
p->impl = 0;
}
template <class TUPLE>
page_impl Multicolumn<TUPLE>::impl() {
page_impl ret = multicolumn_impl;
ret.page_type = Multicolumn<TUPLE>::plugin_id();
ret.pageLoaded = multicolumnLoaded<TUPLE>;
ret.pageFlushed = multicolumnFlushed<TUPLE>;
ret.pageCleanup = multicolumnCleanup<TUPLE>;
return ret;
}
}
#endif // _ROSE_COMPRESSION_MULTICOLUMN_IMPL_H__

View file

@ -0,0 +1,167 @@
#ifndef _ROSE_COMPRESSION_MULTICOLUMN_H__
#define _ROSE_COMPRESSION_MULTICOLUMN_H__
#include <limits.h>
#include <stasis/page.h>
#include <stasis/constants.h>
#include "pstar.h" // for typedefs + consts (XXX add new header?)
#include "tuple.h" // XXX rename tuple.hx
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
/**
@file Page implementation for multi-column, compressed data
STRUCTURE OF A MULTICOLUMN PAGE
<pre>
+----------------------------------------------------------------------+
| col #0 compressed data (opaque) | col #1 compressed data (opaque) |
+-----+---------------------------+-----+------------------------------|
| ... | col #N compressed data (opaque) | |
+-----+---------------------------------+ |
| Free space |
| |
| |
| +----------------------------------------+
| | Exceptions: |
+-----------------------------+ Includes data from multiple cols |
| |
| Exception data is managed (bytes are copied in and out of this |
| region) by the column implementations. Multicolumn mediates between |
| the columns, by recording the length and offset of this region. |
| |
| +---------------+---------------+
| ... | exception # 1 | exception # 0 |
+-----------------------+--------------------+----+--------------------+
| first header byte -> | col #N off, plugin | .. | col #1 off, plugin |
+--------------------+--+-------------+------+----+----+-----------+---+
| col #0 off, plugin | exceptions len | exceptions off | # of cols | ? |
+--------------------+----------------+----------------+-----------+---+
</pre>
Notes:
The 'exceptions' portion of the page grows down from
first_header_byte, while the column data portion grows up from byte
zero... This was an arbitrary decision, and complicated the
implementation somewhat...
Functions whose names end in "_ptr" return pointers to bytes in the
page. That memory is persistant; and will eventually be written
back to the page file.
*/
namespace rose {
template <class TUPLE>
/**
* A "pageLoaded()" callback function for Stasis' buffer manager.
*/
void multicolumnLoaded(Page *p);
template <class TUPLE> class Multicolumn {
public:
static page_impl impl();
static const plugin_id_t PAGE_FORMAT_ID = 1;
Multicolumn(int xid, Page *p, column_number_t column_count,
plugin_id_t * plugins);
~Multicolumn();
/**
@return the compressor used for a column. The nature of the
mapping between table region and compressor instance is
implementation defined, but there will never be more than one
compressor per-column, per-page.
@param col The column whose compressor should be returned.
@return A pointer to a compressor. This pointer is guaranteed to
be valid until the next call to this Multicolumn object. After
that, the pointer returned here is invalid.
*/
void* compressor(column_number_t col) {
return dispatcher_.compressor(col);
}
inline slot_index_t append(int xid, TUPLE const & dat);
inline TUPLE * recordRead(int xid, slot_index_t slot, TUPLE * buf);
inline void pack();
private:
typedef struct column_header {
byte_off_t off;
plugin_id_t plugin_id;
} column_header;
/**
Load an existing multicolumn Page
*/
Multicolumn(Page * p);
/**
The following functions perform pointer arithmetic. This code is
performance critical. These short, inlined functions mostly
perform simple arithmetic expression involving constants. g++'s
optimizer seems to combine and simplify these expressions for us.
See the page layout diagram at the top of this file for an
explanation of where these pointers are stored
*/
inline column_number_t * column_count_ptr() {
return reinterpret_cast<column_number_t*>(p_->memAddr+USABLE_SIZE_OF_PAGE)-1;
}
inline byte_off_t * exceptions_offset_ptr() {
return reinterpret_cast<byte_off_t*>(column_count_ptr())-1;
}
inline byte_off_t * exceptions_len_ptr() {
return exceptions_offset_ptr()-1;;
}
inline column_header * column_header_ptr(column_number_t column_number) {
return reinterpret_cast<column_header*>(exceptions_len_ptr())-(1+column_number);
}
inline byte_off_t * column_offset_ptr(column_number_t column_number) {
return &(column_header_ptr(column_number)->off);
}
/**
This stores the plugin_id associated with this page's compressor.
@see rose::plugin_id()
*/
inline plugin_id_t * column_plugin_id_ptr(column_number_t column_number) {
return &(column_header_ptr(column_number)->plugin_id);
}
/**
The first byte that contains data for this column.
The length of the column data can be determined by calling
COMPRESSOR's bytes_used() member function. (PluginDispatcher
can handle this).
*/
inline byte * column_base_ptr(column_number_t column_number) {
return *column_offset_ptr(column_number) + p_->memAddr;
}
inline byte * first_header_byte_ptr() {
return reinterpret_cast<byte*>(column_header_ptr((*column_count_ptr())-1));
}
static inline plugin_id_t plugin_id();
Page * p_;
byte ** columns_;
byte_off_t first_exception_byte_;
byte * exceptions_;
PluginDispatcher dispatcher_;
int bytes_left_;
int unpacked_;
friend void multicolumnLoaded<TUPLE>(Page *p);
};
} // namespace rose
#endif // _ROSE_COMPRESSION_MULTICOLUMN_H__

View file

@ -0,0 +1,110 @@
#ifndef _ROSE_COMPRESSION_PSTAR_IMPL_H__
#define _ROSE_COMPRESSION_PSTAR_IMPL_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
#include <string.h>
#include <assert.h>
#include "pstar.h"
#include "for.h"
namespace rose {
/**
Appends a value to a page managed by pstar. For now, most of the
"real work" is handled by the compression algorithm.
This function simply checks the return value from the plugin. If
the value should be stored as an exception, then it is prepended to
the list of exceptions at end of the page. The compressed data is kept
at the beginning of the page.
*/
template <class COMPRESSOR, class TYPE>
slot_index_t
Pstar<COMPRESSOR, TYPE>::append(int xid, const TYPE dat) {
slot_index_t ret = plug_.append(xid, dat, freespace_ptr(), p_->memAddr,
&free_bytes_);
return free_bytes_ >= 0 ? ret : NOSPACE;
}
// The rest of this file interfaces with Stasis -------------------------
/**
Implementation of the Stasis pageLoaded() callback.
@see stasis/page.h
*/
template <class COMPRESSOR, class TYPE>
static void pStarLoaded(Page * p) {
p->LSN = *stasis_page_lsn_ptr(p);
p->impl = new Pstar<COMPRESSOR, TYPE>(p);
}
/**
Implementation of the Stasis pageFlushed() callback.
*/
template <class COMPRESSOR, class TYPE>
static void pStarFlushed(Page * p) {
*stasis_page_lsn_ptr(p) = p->LSN;
}
template <class COMPRESSOR, class TYPE>
static void pStarCleanup(Page * p) {
delete (Pstar<COMPRESSOR, TYPE>*)p->impl;
}
/**
Basic page_impl for pstar
@see stasis/page.h
*/
static const page_impl pstar_impl = {
-1,
0, // pStarRead,
0, // pStarWrite,
0, // pStarReadDone,
0, // pStarWriteDone,
0, // pStarGetType,
0, // pStarSetType,
0, // pStarGetLength,
0, // pStarFirst,
0, // pStarNext,
0, // pStarIsBlockSupported,
0, // pStarBlockFirst,
0, // pStarBlockNext,
0, // pStarBlockDone,
0, // pStarFreespace,
0, // pStarCompact,
0, // pStarPreRalloc,
0, // pStarPostRalloc,
0, // pStarFree,
0, // dereference_identity,
0, // pStarLoaded,
0, // pStarFlushed
0, // pStarCleanup
};
/**
Be sure to call "registerPageType(Pstar<...>::impl())" once for
each template instantiation that Stasis might encounter, even if a
particular binary might not use that instantiation. This must be
done before calling Tinit().
@see registerPageType() from Stasis.
*/
template<class COMPRESSOR, class TYPE>
page_impl
Pstar<COMPRESSOR, TYPE>::impl() {
page_impl ret = pstar_impl;
ret.page_type = plugin_id<Pstar<COMPRESSOR,TYPE>,COMPRESSOR,TYPE>();
ret.pageLoaded = pStarLoaded<COMPRESSOR, TYPE>;
ret.pageFlushed = pStarFlushed<COMPRESSOR, TYPE>;
ret.pageCleanup = pStarCleanup<COMPRESSOR, TYPE>;
return ret;
}
} // namespace rose
#endif // _ROSE_COMPRESSION_PSTAR_IMPL_H__

View file

@ -0,0 +1,119 @@
#ifndef _ROSE_COMPRESSION_PSTAR_H__
#define _ROSE_COMPRESSION_PSTAR_H__
#include <limits.h>
#include <stasis/page.h>
#include <stasis/constants.h>
#include "compression.h"
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
namespace rose {
template <class COMPRESSOR, class TYPE>
void pStarLoaded(Page *p);
template<class PAGEFORMAT, class COMPRESSOR, class TYPE>
inline plugin_id_t plugin_id();
template <class COMPRESSOR, class TYPE> class Pstar {
public:
// Initialize a new Pstar page
Pstar(int xid, Page *p): p_(p), plug_(COMPRESSOR(xid, p->memAddr)) {
*stasis_page_type_ptr(p) = plugin_id<Pstar<COMPRESSOR,TYPE>,COMPRESSOR,TYPE>();
*freespace_ptr() = (intptr_t)recordsize_ptr() - (intptr_t)p_->memAddr;
*recordsize_ptr() = sizeof(TYPE);
free_bytes_ = *freespace_ptr() - plug_.bytes_used() - plug_.max_overrun();
p->impl = this;
}
inline void pack() { };
/**
Append a new value to a page managed by pstar.
@param xid the transaction adding the data to the page
@param dat the value to be added to the page.
*/
slot_index_t append(int xid, const TYPE dat);
// @todo If we want to support multiple columns per page, then recordSize
// and recordType need to be handled by the compressor.
inline record_size_t recordType(int xid, slot_index_t slot) {
return *recordsize_ptr();
}
inline void recordType(int xid, slot_index_t slot,
record_size_t type) {
*recordsize_ptr() = type;
}
inline record_size_t recordLength(int xid, slot_index_t slot) {
return physical_slot_length(recordType(xid, slot));
}
/**
Read a value from a page managed by pstar.
@param xid the transaction reading the record.
@param buf scratch space for recordRead.
@return NULL if there is no such slot, or a pointer to rhe
value.
If a pointer is returned, it might point to the memory passed via
scratch, or it might point to memory managed by the page
implementation. The return value will not be invalidated as long as
the following two conditions apply:
1) The page is pinned; loadPage() has been called, but releasePage()
has not been called.
2) The memory that scratch points to has not been freed, or reused
in a more recent call to recordRead().
*/
inline TYPE * recordRead(int xid, slot_index_t slot, TYPE * buf) {
// byte_off_t except = 0;
TYPE * ret = plug_.recordRead(xid, slot, p_->memAddr, buf);
// if (ret == reinterpret_cast<TYPE*>(INVALID_SLOT)) { return 0; }
/* if (ret == reinterpret_cast<TYPE*>(EXCEPTIONAL)) {
return reinterpret_cast<TYPE*>(
&(p_->memAddr[except-recordLength(xid, rid.slot)]));
} */
return ret;
}
inline COMPRESSOR * compressor() { return &plug_; }
static page_impl impl();
static const plugin_id_t PAGE_FORMAT_ID = 0;
private:
// Load an existing Pstar page
Pstar(Page *p): p_(p), plug_(COMPRESSOR(p->memAddr)) {
free_bytes_ = *freespace_ptr() - plug_.bytes_used() - plug_.max_overrun();
}
inline byte_off_t * freespace_ptr() {
return reinterpret_cast<byte_off_t*>(p_->memAddr+USABLE_SIZE_OF_PAGE)-1;
}
inline record_size_t * recordsize_ptr() {
return reinterpret_cast<record_size_t*>(freespace_ptr())-1;
}
inline void page(Page * p) {
p_ = p;
plug_.memAddr(p->memAddr);
}
Page *p_;
COMPRESSOR plug_;
int free_bytes_;
friend void pStarLoaded<COMPRESSOR, TYPE>(Page *p);
};
} // namespace rose
#endif // _ROSE_COMPRESSION_PSTAR_H__

View file

@ -0,0 +1,76 @@
#ifndef _ROSE_COMPRESSION_RLE_IMPL_H__
#define _ROSE_COMPRESSION_RLE_IMPL_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
#include <assert.h>
#include "rle.h"
namespace rose {
/**
Store a new value in run length encoding. If this value matches
the previous one, increment a counter. Otherwise, create a new
triple_t to hold the new value and its count. Most of the
complexity comes from dealing with integer overflow, and running
out of space.
*/
template <class TYPE>
inline slot_index_t
Rle<TYPE>::append(int xid, const TYPE dat,
byte_off_t* except, byte * exceptions, //char *exceptional,
int *free_bytes) {
int64_t ret;
ret = last_block_ptr()->index + last_block_ptr()->copies;
if (dat != last_block_ptr()->data ||
last_block_ptr()->copies == MAX_COPY_COUNT) {
// this key is not the same as the last one, or
// the block is full
*free_bytes -= sizeof(triple_t);
// Write the changes in our overrun space
triple_t *n = new_block_ptr();
n->index = ret;
n->copies = 1;
n->data = dat;
// Finalize the changes unless we're out of space
(*block_count_ptr()) += (*free_bytes >= 0);
} else if(ret == MAX_INDEX) {
// out of address space
*free_bytes = -1;
ret = NOSPACE;
} else {
// success; bump number of copies of this item, and return.
last_block_ptr()->copies++;
}
return (slot_index_t)ret;
}
template <class TYPE>
inline TYPE *
Rle<TYPE>::recordRead(int xid, slot_index_t slot, byte* exceptions,
TYPE * scratch) {
block_index_t n = nth_block_ptr(last_)->index <= slot ? last_ : 0;
// while (n < *block_count_ptr()) {
do {
triple_t * t = nth_block_ptr(n);
if (t->index <= slot && t->index + t->copies > slot) {
*scratch = t->data;
last_ = n;
return scratch;
}
n++;
} while (n < *block_count_ptr());
return 0;
}
} // namespace rose
#endif // _ROSE_COMPRESSION_RLE_IMPL_H__

View file

@ -0,0 +1,100 @@
#ifndef _ROSE_COMPRESSION_RLE_H__
#define _ROSE_COMPRESSION_RLE_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
#include <limits.h>
#include <ctype.h>
#include "pstar.h"
namespace rose {
template <class TYPE>
class Rle {
public:
typedef uint32_t block_index_t;
typedef uint16_t copy_count_t;
static const copy_count_t MAX_COPY_COUNT = USHRT_MAX;
struct triple_t {
slot_index_t index;
copy_count_t copies;
//byte foo[100]; // <-- Uncomment to test boundaries
TYPE data;
};
static const int PLUGIN_ID = 1;
inline void offset(TYPE off) { nth_block_ptr(0)->data = off; };
inline size_t max_overrun() { return sizeof(triple_t); }
/** @see For::append */
inline slot_index_t append(int xid, const TYPE dat,
byte_off_t* except, byte * exceptions,
int * free_bytes);
/** @see For::recordRead */
inline TYPE *recordRead(int xid, slot_index_t slot, byte *exceptions,
TYPE *scratch);
/**
This constructor initializes a new Rle region.
@param xid the transaction that created the new region.
*/
Rle(int xid, void * mem): mem_(mem), last_(0) {
*block_count_ptr() = 1;
triple_t * n = last_block_ptr();
n->index = 0;
n->copies = 0;
n->data = 0;
}
/**
This constructor is called when existing RLE data is read from
disk.
*/
Rle(void * mem): mem_(mem), last_(0) { }
Rle() : mem_(0), last_(0) {}
/**
@see For::bytes_used();
*/
inline byte_off_t bytes_used() {
return ((intptr_t)(last_block_ptr()+1))-(intptr_t)mem_;
}
inline void init_mem(void * mem) {
mem_=mem;
last_=0;
*block_count_ptr() = 1;
triple_t * n = nth_block_ptr(0);
n->index = 0;
n->copies = 0;
n->data = 0;
}
inline void mem(void * mem) {
mem_=mem;
last_=0;
}
private:
inline TYPE offset() { return nth_block_ptr(0)->dat; }
inline block_index_t* block_count_ptr() {
return reinterpret_cast<block_index_t*>(mem_);
}
inline triple_t* nth_block_ptr(block_index_t n) {
return reinterpret_cast<triple_t*>(block_count_ptr()+1) + n;
}
inline triple_t* last_block_ptr() {
return nth_block_ptr(*block_count_ptr()-1);
}
inline triple_t* new_block_ptr() {
return nth_block_ptr(*block_count_ptr());
}
void * mem_;
block_index_t last_;
};
} // namespace rose
#endif // _ROSE_COMPRESSION_RLE_H__

View file

@ -0,0 +1,336 @@
#ifndef _ROSE_COMPRESSION_TUPLE_H__
#define _ROSE_COMPRESSION_TUPLE_H__
// Copyright 2007 Google Inc. All Rights Reserved.
// Author: sears@google.com (Rusty Sears)
/**
@file Implementation of tuples (Tuple) and dispatch routines for
column wide compression (PluginDispatcher).
*/
#include <limits.h>
#include <ctype.h>
#include "compression.h"
#include "pstar-impl.h"
#include "multicolumn.h"
namespace rose {
template <class TUPLE> class Multicolumn;
template<class TYPE> class Tuple;
/**
PluginDispatcher essentially just wraps calls to compressors in
switch statements.
It has a number of deficiencies:
1) Performance. The switch statement is the main CPU bottleneck
for both of the current compression schemes.
2) PluginDispatcher has to "know" about all compression
algorithms and all data types that it may encounter.
This approach has one advantage; it doesn't preclude other
(templatized) implementations that hardcode schema formats a
compile time.
Performance could be partially addressed by using a blocking append
algorithm:
A Queue up multiple append requests (or precompute read requests)
when appropriate.
B Before appending, calculate a lower (pessimistic) bound on the
number of inserted tuples that can fit in the page:
n = (free bytes) / (maximum space per tuple)
C Compress n tuples from each column at a time. Only evaluate the
switch statement once for each column.
D Repeat steps B and C until n is below some threshold, then
revert the current behavior.
Batching read requests is simpler, and would be useful for
sequential scans over the data.
*/
class PluginDispatcher{
public:
#define dispatchSwitch(col,cases,...) \
static const int base = USER_DEFINED_PAGE(0) + 2 * 2 * 4;\
switch(plugin_ids_[col]-base) { \
cases(0, For<uint8_t>, col,uint8_t, __VA_ARGS__); \
cases(1, For<uint16_t>,col,uint16_t,__VA_ARGS__); \
cases(2, For<uint32_t>,col,uint32_t,__VA_ARGS__); \
cases(3, For<uint64_t>,col,uint64_t,__VA_ARGS__); \
cases(4, For<int8_t>, col,int8_t, __VA_ARGS__); \
cases(5, For<int16_t>, col,int16_t, __VA_ARGS__); \
cases(6, For<int32_t>, col,int32_t, __VA_ARGS__); \
cases(7, For<int64_t>, col,int64_t, __VA_ARGS__); \
cases(8, Rle<uint8_t>, col,uint8_t, __VA_ARGS__); \
cases(9, Rle<uint16_t>,col,uint16_t,__VA_ARGS__); \
cases(10,Rle<uint32_t>,col,uint32_t,__VA_ARGS__); \
cases(11,Rle<uint64_t>,col,uint64_t,__VA_ARGS__); \
cases(12,Rle<int8_t>, col,int8_t, __VA_ARGS__); \
cases(13,Rle<int16_t>, col,int16_t, __VA_ARGS__); \
cases(14,Rle<int32_t>, col,int32_t, __VA_ARGS__); \
cases(15,Rle<int64_t>, col,int64_t, __VA_ARGS__); \
default: abort(); \
};
#define caseAppend(off,plug_type,col,type,fcn,ret,xid,dat,...) \
case off: { \
ret = ((plug_type*)plugins_[col])->fcn(xid,*(type*)dat,__VA_ARGS__); } break
#define caseSetPlugin(off,plug_type,col,type,m) \
case off: { plugins_[col] = new plug_type(m); } break
#define caseDelPlugin(off,plug_type,col,type,m) \
case off: { delete (plug_type*)plugins_[col]; } break
#define caseRead(off,plug_type,col,type,m,ret,fcn,xid,slot,except,scratch) \
case off: { ret = ((plug_type*)plugins_[col])->fcn(xid,slot,except,(type*)scratch); } break
#define caseNoArg(off,plug_type,col,type,m,ret,fcn) \
case off: { ret = ((plug_type*)plugins_[col])->fcn(); } break
#define caseInitMem(off,plug_type,col,type,m) \
case off: { ((plug_type*)plugins_[col])->init_mem(m); } break
#define caseMem(off,plug_type,col,type,m) \
case off: { ((plug_type*)plugins_[col])->mem(m); } break
#define caseCompressor(off,plug_type,col,type,nil) \
case off: { ret = (plug_type*)plugins_[col]; } break
inline slot_index_t recordAppend(int xid, column_number_t col,
const void *dat, byte_off_t* except,
byte *exceptions, int *free_bytes) {
slot_index_t ret;
dispatchSwitch(col,caseAppend,append,ret,xid,dat,except,exceptions,
free_bytes);
return ret;
}
inline void *recordRead(int xid, byte *mem, column_number_t col,
slot_index_t slot, byte* exceptions, void *scratch) {
void * ret;
dispatchSwitch(col,caseRead,mem,ret,recordRead,xid,slot,exceptions,scratch);
return ret;
}
inline byte_off_t bytes_used(column_number_t col) {
byte_off_t ret;
dispatchSwitch(col,caseNoArg,mem,ret,bytes_used);
return ret;
}
inline void init_mem(byte * mem, column_number_t col) {
dispatchSwitch(col,caseInitMem,mem);
}
inline void mem(byte * mem, column_number_t col) {
dispatchSwitch(col,caseMem,mem);
}
inline void * compressor(column_number_t col) {
void * ret;
dispatchSwitch(col,caseCompressor,0);
return ret;
}
PluginDispatcher(column_number_t column_count) :
column_count_(column_count), plugin_ids_(new plugin_id_t[column_count]), plugins_(new void*[column_count]) {
for(column_number_t i = 0; i < column_count; i++) {
plugin_ids_[i] = 0;
}
}
PluginDispatcher(int xid, byte *mem,column_number_t column_count, plugin_id_t * plugins) :
column_count_(column_count), plugin_ids_(new plugin_id_t[column_count]), plugins_(new void*[column_count]) {
for(column_number_t i = 0; i < column_count; i++) {
plugin_ids_[i] = 0;
set_plugin(mem,i,plugins[i]);
}
}
inline void set_plugin(byte *mem,column_number_t c, plugin_id_t p) {
if(plugin_ids_[c]) {
dispatchSwitch(c,caseDelPlugin,0);
}
plugin_ids_[c] = p;
dispatchSwitch(c,caseSetPlugin,mem);
}
~PluginDispatcher() {
for(column_number_t i = 0; i < column_count_; i++) {
dispatchSwitch(i,caseDelPlugin,0);
}
delete[] plugin_ids_;
delete[] plugins_;
}
#undef caseAppend
#undef caseSetPlugin
#undef caseDelPlugin
#undef caseRead
#undef caseNoArg
#undef caseInitMem
#undef caseCompressor
private:
column_number_t column_count_;
plugin_id_t * plugin_ids_;
void ** plugins_;
};
template<class TYPE>
class Tuple {
public:
explicit Tuple(column_number_t count) : count_(count),
cols_(new TYPE[count]),
byteArray_(new byte[sizeof(count_)+count_*sizeof(TYPE)]) {}
/*explicit Tuple(byte* b) : count_(*(column_number_t*)b),
cols_(new TYPE[count_]),
byteArray_(new byte[sizeof(count_)+count_*sizeof(TYPE)]) {
memcpy(cols_,b+sizeof(column_number_t), sizeof(TYPE)*count_);
} */
explicit Tuple(Tuple& t) : count_(t.count_), cols_(new TYPE[count_]),
byteArray_(new byte[sizeof(count_)+count_*sizeof(TYPE)]) {
for(column_number_t c = 0; c < count_; c++) {
cols_[c] = t.cols_[c];
}
}
Tuple(TYPE t) : count_(0), cols_(new TYPE[1]),
byteArray_(new byte[sizeof(count_)+sizeof(TYPE)]) {
cols_[0] = t;
}
/* Tuple(Tuple *t) : count_(t->count_),cols_(new TYPE[count_]) {
for(column_number_t c = 0; c < count_; c++) {
cols_[c] = t->cols_[c];
}
} */
inline ~Tuple() { delete[] cols_; delete[] byteArray_; }
inline TYPE * set(column_number_t col,void* val) {
cols_[col] = *(TYPE*)val;
return (TYPE*)val;
}
inline TYPE * get(column_number_t col) const {
return &(cols_[col]);
}
inline column_number_t column_count() const {
return count_;
}
inline byte_off_t column_len(column_number_t col) const {
return sizeof(TYPE);
}
/* inline void fromByteArray(byte * b) {
assert(count_ == *(column_number_t*)b);
// memcpy(cols_,b+sizeof(column_number_t),sizeof(TYPE)*count_);
TYPE *newCols = (int*)(b + sizeof(column_number_t));
for(column_number_t i = 0; i < count_; i++) {
cols_[i] = newCols[i];
}
} */
inline byte* toByteArray() {
byte* ret = byteArray_;
memcpy(ret, &count_, sizeof(count_));
memcpy(ret+sizeof(count_), cols_, count_ * sizeof(TYPE));
return ret;
}
/* inline operator const byte * () {
return toByteArray();
} */
inline operator TYPE () {
return cols_[0]; //*get(0);
}
/* inline operator TYPE () {
assert(count_ == 0);
return cols_[0];
} */
static inline size_t sizeofBytes(column_number_t cols) {
return sizeof(column_number_t) + cols * sizeof(TYPE);
}
static const int TUPLE_ID = 0;
/* inline bool operator==(Tuple *t) {
return *this == *t;
} */
inline bool operator==(Tuple &t) {
//if(t.count_ != count_) return 0;
for(column_number_t i = 0; i < count_; i++) {
if(cols_[i] != t.cols_[i]) { return 0;}
}
return 1;
}
inline bool operator<(Tuple &t) {
//if(t.count_ != count_) return 0;
for(column_number_t i = 0; i < count_; i++) {
if(cols_[i] < t.cols_[i]) { return 1;}
}
return 0;
}
/* inline bool operator==(TYPE val) {
assert(count_ == 1);
return cols_[0] == val;
}*/
class iterator {
public:
inline iterator(column_number_t c, TYPE const *const *const dataset, int offset) :
c_(c),
dat_(dataset),
off_(offset),
scratch_(c_) {}
inline explicit iterator(const iterator &i) : c_(i.c_), dat_(i.dat_), off_(i.off_),
scratch_(c_) {}
inline Tuple<TYPE>& operator*() {
for(column_number_t i = 0; i < c_; i++) {
scratch_.set(i,(void*)&dat_[i][off_]);
}
return scratch_;
}
inline bool operator==(const iterator &a) const {
//assert(dat_==a.dat_ && c_==a.c_);
return (off_==a.off_);
}
inline bool operator!=(const iterator &a) const {
//assert(dat_==a.dat_ && c_==a.c_);
return (off_!=a.off_);
}
inline void operator++() { off_++; }
inline void operator--() { off_--; }
inline void operator+=(int i) { abort(); }
inline int operator-(iterator&i) {
return off_ - i.off_;
}
inline void operator=(iterator &i) {
assert(c_==i.c_);
assert(dat_==i.dat_);
off_=i.off_;
}
inline void offset(int off) {
off_=off;
}
private:
column_number_t c_;
TYPE const * const * dat_;
int off_;
Tuple<TYPE> scratch_;
};
private:
Tuple() { abort(); }
explicit Tuple(const Tuple& t) { abort(); }
column_number_t count_;
TYPE * const cols_;
byte * byteArray_;
};
}
#endif // _ROSE_COMPRESSION_TUPLE_H__