2004-07-06 01:22:18 +00:00
|
|
|
#include <config.h>
|
|
|
|
#include <lladd/common.h>
|
|
|
|
|
2005-01-20 21:19:47 +00:00
|
|
|
#include <lladd/operations.h>
|
2004-07-06 01:22:18 +00:00
|
|
|
#include <lladd/transactional.h>
|
2004-06-24 21:10:31 +00:00
|
|
|
#include <lladd/bufferManager.h>
|
2004-06-30 01:09:57 +00:00
|
|
|
#include "../blobManager.h"
|
2004-07-23 20:21:44 +00:00
|
|
|
#include "../page.h"
|
2004-08-17 01:46:17 +00:00
|
|
|
#include "../page/slotted.h"
|
|
|
|
|
|
|
|
#include <assert.h>
|
2004-06-24 21:10:31 +00:00
|
|
|
/**
|
2004-07-20 03:40:57 +00:00
|
|
|
@file
|
|
|
|
|
2004-06-24 21:10:31 +00:00
|
|
|
Implementation of Talloc() as an operation
|
|
|
|
|
|
|
|
This is a bit strange compared to other operations, as it happens
|
|
|
|
in two phases. The buffer manager reserves space for a record
|
|
|
|
before the log entry is allocated. Then, the recordid of this
|
|
|
|
space is written to the log. Finally, alloc tells bufferManager
|
|
|
|
that it will use the space.
|
|
|
|
|
|
|
|
@todo Currently, if the system crashes during an alloc, (before the
|
|
|
|
log is flushed, but after bufferManager returns a rid), then the
|
|
|
|
space alloced during the crash is leaked. This doesn't seem to be
|
2004-06-28 22:48:02 +00:00
|
|
|
too big of a deal, but it should be fixed someday. A more serious
|
|
|
|
problem results from crashes during blob allocation.
|
2004-07-20 03:40:57 +00:00
|
|
|
|
|
|
|
@ingroup OPERATIONS
|
|
|
|
|
|
|
|
$Id$
|
2004-06-24 21:10:31 +00:00
|
|
|
|
|
|
|
*/
|
|
|
|
|
2004-07-23 20:21:44 +00:00
|
|
|
static int operate(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
|
2004-08-17 01:46:17 +00:00
|
|
|
/* * @ todo Currently, Talloc() needs to clean up the page type (for recovery). Should this be elsewhere? */
|
|
|
|
|
2004-12-06 01:20:48 +00:00
|
|
|
/* if(*page_type_ptr(p) == UNINITIALIZED_PAGE) {
|
2004-08-17 01:46:17 +00:00
|
|
|
*page_type_ptr(p) = SLOTTED_PAGE;
|
2004-12-06 01:20:48 +00:00
|
|
|
}
|
2004-08-17 01:46:17 +00:00
|
|
|
|
2004-12-06 01:20:48 +00:00
|
|
|
assert(*page_type_ptr(p) == SLOTTED_PAGE); */
|
|
|
|
|
2005-01-31 01:29:52 +00:00
|
|
|
if(rid.size >= BLOB_THRESHOLD_SIZE && rid.size != BLOB_SLOT) {
|
2004-07-26 20:37:04 +00:00
|
|
|
allocBlob(xid, p, lsn, rid);
|
2004-06-30 01:09:57 +00:00
|
|
|
} else {
|
2004-08-17 01:46:17 +00:00
|
|
|
slottedPostRalloc(p, lsn, rid);
|
2004-06-30 01:09:57 +00:00
|
|
|
}
|
|
|
|
|
2004-06-24 21:10:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
/** @todo Currently, we leak empty pages on dealloc. */
|
2004-07-23 20:21:44 +00:00
|
|
|
static int deoperate(int xid, Page * p, lsn_t lsn, recordid rid, const void * dat) {
|
2004-10-02 07:29:34 +00:00
|
|
|
assert(rid.page == p->id);
|
|
|
|
slottedDeRalloc(p, lsn, rid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reoperate(int xid, Page *p, lsn_t lsn, recordid rid, const void * dat) {
|
|
|
|
|
2005-01-31 01:29:52 +00:00
|
|
|
if(rid.size >= BLOB_THRESHOLD_SIZE && rid.size != BLOB_SLOT) {
|
|
|
|
// rid.size = BLOB_REC_SIZE; /* Don't reuse blob space yet... */
|
|
|
|
rid.size = sizeof(blob_record_t);
|
2004-10-02 07:29:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
slottedPostRalloc(p, lsn, rid);
|
2004-10-06 06:08:09 +00:00
|
|
|
/** @todo dat should be the pointer to the space in the blob store. */
|
2004-10-02 07:29:34 +00:00
|
|
|
writeRecord(xid, p, lsn, rid, dat);
|
|
|
|
|
2004-06-24 21:10:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-10-06 06:08:09 +00:00
|
|
|
static pthread_mutex_t talloc_mutex;
|
|
|
|
|
2004-06-24 21:10:31 +00:00
|
|
|
Operation getAlloc() {
|
2004-10-06 06:08:09 +00:00
|
|
|
pthread_mutex_init(&talloc_mutex, NULL);
|
2004-06-24 21:10:31 +00:00
|
|
|
Operation o = {
|
|
|
|
OPERATION_ALLOC, /* ID */
|
|
|
|
0,
|
2004-10-18 18:24:54 +00:00
|
|
|
OPERATION_DEALLOC, /* OPERATION_NOOP, */
|
2004-06-24 21:10:31 +00:00
|
|
|
&operate
|
|
|
|
};
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2004-06-28 21:10:10 +00:00
|
|
|
|
2004-10-02 07:29:34 +00:00
|
|
|
Operation getDealloc() {
|
|
|
|
Operation o = {
|
|
|
|
OPERATION_DEALLOC,
|
|
|
|
SIZEOF_RECORD,
|
2004-10-18 18:24:54 +00:00
|
|
|
OPERATION_REALLOC, /* OPERATION_NOOP, */
|
2004-10-02 07:29:34 +00:00
|
|
|
&deoperate
|
|
|
|
};
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*This is only used to undo deallocs... */
|
|
|
|
Operation getRealloc() {
|
|
|
|
Operation o = {
|
|
|
|
OPERATION_REALLOC,
|
|
|
|
0,
|
|
|
|
OPERATION_NOOP,
|
|
|
|
&reoperate
|
|
|
|
};
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2004-07-06 01:22:18 +00:00
|
|
|
recordid Talloc(int xid, long size) {
|
2004-06-24 21:10:31 +00:00
|
|
|
recordid rid;
|
2004-10-06 06:08:09 +00:00
|
|
|
Page * p = NULL;
|
2005-01-31 01:29:52 +00:00
|
|
|
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
|
2005-01-28 03:32:17 +00:00
|
|
|
/**@todo is it OK that Talloc doesn't pin the page when a blob is alloced?*/
|
2004-06-30 01:09:57 +00:00
|
|
|
rid = preAllocBlob(xid, size);
|
|
|
|
} else {
|
2004-10-06 06:08:09 +00:00
|
|
|
pthread_mutex_lock(&talloc_mutex);
|
|
|
|
rid = slottedPreRalloc(xid, size, &p);
|
|
|
|
assert(p != NULL);
|
2004-06-30 01:09:57 +00:00
|
|
|
}
|
2004-06-24 21:10:31 +00:00
|
|
|
|
|
|
|
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
|
2004-10-06 06:08:09 +00:00
|
|
|
|
|
|
|
if(p != NULL) {
|
|
|
|
/* release the page that preAllocBlob pinned for us. */
|
|
|
|
|
|
|
|
/* @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes.. */
|
|
|
|
releasePage(p);
|
|
|
|
pthread_mutex_unlock(&talloc_mutex);
|
|
|
|
|
|
|
|
}
|
2004-06-24 21:10:31 +00:00
|
|
|
|
|
|
|
return rid;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2005-01-28 03:32:17 +00:00
|
|
|
recordid TallocFromPage(int xid, long page, long size) {
|
|
|
|
recordid rid;
|
|
|
|
|
|
|
|
Page * p = NULL;
|
2005-01-31 01:29:52 +00:00
|
|
|
if(size >= BLOB_THRESHOLD_SIZE && size != BLOB_SLOT) {
|
2005-01-28 03:32:17 +00:00
|
|
|
rid = preAllocBlobFromPage(xid, page, size);
|
|
|
|
} else {
|
|
|
|
pthread_mutex_lock(&talloc_mutex);
|
|
|
|
rid = slottedPreRallocFromPage(xid, page, size, &p);
|
2005-01-29 01:17:37 +00:00
|
|
|
if(p == NULL) {
|
|
|
|
assert(rid.size == -1);
|
|
|
|
pthread_mutex_unlock(&talloc_mutex);
|
|
|
|
return rid;
|
|
|
|
}
|
2005-01-28 03:32:17 +00:00
|
|
|
}
|
|
|
|
Tupdate(xid,rid, NULL, OPERATION_ALLOC);
|
|
|
|
|
|
|
|
if(p != NULL) {
|
|
|
|
/* release the page that preRallocFromPage pinned for us. */
|
|
|
|
/* @todo alloc.c pins multiple pages -> Will deadlock with small buffer sizes.. */
|
|
|
|
releasePage(p);
|
|
|
|
pthread_mutex_unlock(&talloc_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
2004-06-24 21:10:31 +00:00
|
|
|
void Tdealloc(int xid, recordid rid) {
|
2004-10-02 07:29:34 +00:00
|
|
|
void * preimage = malloc(rid.size);
|
|
|
|
Page * p = loadPage(rid.page);
|
|
|
|
readRecord(xid, p, rid, preimage);
|
2004-10-06 06:08:09 +00:00
|
|
|
/** @todo race in Tdealloc; do we care, or is this something that the log manager should cope with? */
|
2004-10-02 07:29:34 +00:00
|
|
|
Tupdate(xid, rid, preimage, OPERATION_DEALLOC);
|
2004-10-06 06:08:09 +00:00
|
|
|
releasePage(p);
|
2004-10-02 07:29:34 +00:00
|
|
|
free(preimage);
|
2004-06-24 21:10:31 +00:00
|
|
|
}
|
2004-12-01 01:26:25 +00:00
|
|
|
|
|
|
|
int TrecordType(int xid, recordid rid) {
|
|
|
|
Page * p = loadPage(rid.page);
|
|
|
|
int ret = getRecordType(xid, p, rid);
|
|
|
|
releasePage(p);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-01-28 03:32:17 +00:00
|
|
|
|
|
|
|
int TrecordSize(int xid, recordid rid) {
|
|
|
|
int ret;
|
|
|
|
Page * p = loadPage(rid.page);
|
|
|
|
ret = getRecordSize(xid, p, rid);
|
|
|
|
releasePage(p);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int TrecordsInPage(int xid, int pageid) {
|
|
|
|
Page * p = loadPage(pageid);
|
|
|
|
readlock(p->rwlatch, 187);
|
|
|
|
int ret = *numslots_ptr(p);
|
|
|
|
unlock(p->rwlatch);
|
|
|
|
releasePage(p);
|
|
|
|
return ret;
|
|
|
|
}
|