2004-08-21 00:03:30 +00:00
|
|
|
#define _XOPEN_SOURCE 600
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2004-08-03 02:04:56 +00:00
|
|
|
#include "../page.h"
|
|
|
|
#include <lladd/operations/pageOperations.h>
|
|
|
|
#include <assert.h>
|
2004-08-21 00:03:30 +00:00
|
|
|
/*#include "../page/slotted.h"*/
|
|
|
|
#include "../page/header.h"
|
|
|
|
#include "../pageFile.h"
|
2005-05-05 21:34:12 +00:00
|
|
|
#include "../page/fixed.h"
|
|
|
|
#include <alloca.h>
|
2004-08-21 00:03:30 +00:00
|
|
|
|
|
|
|
static pthread_mutex_t pageAllocMutex;
|
|
|
|
|
2004-08-03 02:04:56 +00:00
|
|
|
int __pageSet(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
|
|
|
memcpy(p->memAddr, d, PAGE_SIZE);
|
2005-02-10 03:51:09 +00:00
|
|
|
pageWriteLSN(xid, p, lsn);
|
2004-08-03 02:04:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2004-08-21 00:03:30 +00:00
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function int TpageGet(int xid, int pageid, byte *memAddr) {
|
|
|
|
Page * q = 0;
|
|
|
|
try_ret(compensation_error()) {
|
|
|
|
q = loadPage(xid, pageid);
|
|
|
|
memcpy(memAddr, q->memAddr, PAGE_SIZE);
|
|
|
|
} end_ret(compensation_error());
|
2006-07-25 01:03:57 +00:00
|
|
|
try_ret(compensation_error()) {
|
|
|
|
releasePage(q);
|
|
|
|
} end_ret(compensation_error());
|
2004-08-21 00:03:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function int TpageSet(int xid, int pageid, byte * memAddr) {
|
2004-08-03 02:04:56 +00:00
|
|
|
recordid rid;
|
|
|
|
rid.page = pageid;
|
|
|
|
rid.slot = 0;
|
|
|
|
rid.size = 0;
|
2005-02-22 03:10:54 +00:00
|
|
|
try_ret(compensation_error()) {
|
|
|
|
Tupdate(xid,rid,memAddr, OPERATION_PAGE_SET);
|
|
|
|
} end_ret(compensation_error());
|
2004-08-03 02:04:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-07-21 01:07:09 +00:00
|
|
|
/** @todo this should be dynamic. */
|
|
|
|
#define TALLOC_PAGE_REGION_SIZE 128 // 512K
|
2004-08-21 00:03:30 +00:00
|
|
|
|
2006-07-20 01:29:39 +00:00
|
|
|
/**
|
|
|
|
This calls loadPage and releasePage directly, and bypasses the
|
|
|
|
logger.
|
|
|
|
*/
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function void pageOperationsInit() {
|
2006-07-20 01:29:39 +00:00
|
|
|
|
|
|
|
regionsInit();
|
2004-08-21 00:03:30 +00:00
|
|
|
|
2006-07-21 01:07:09 +00:00
|
|
|
boundary_tag t;
|
|
|
|
recordid rid = {0, 0, sizeof(boundary_tag)};
|
|
|
|
// Need to find a region with some free pages in it.
|
|
|
|
Tread(-1, rid, &t);
|
|
|
|
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
pthread_mutex_init(&pageAllocMutex, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
/** @todo TpageAlloc / TpageDealloc + undo + page reuse is not multi-transaction / threadsafe.
|
2004-08-21 00:03:30 +00:00
|
|
|
|
|
|
|
Example of the problem:
|
|
|
|
|
|
|
|
T1 T2
|
|
|
|
dealloc(100)
|
|
|
|
(a) list ptr 30 -> 100
|
|
|
|
(b) p(100) nil -> 30
|
|
|
|
alloc() -> 100 <- Can't allow this to happen!
|
|
|
|
list_ptr 100 -> 30
|
|
|
|
alloc() -> 30
|
|
|
|
list_ptr 30 -> 20
|
|
|
|
abort();
|
|
|
|
|
|
|
|
// Really just needs to remove 100 from the linked list. Instead,
|
|
|
|
we use physical, value based locking.
|
|
|
|
|
|
|
|
list ptr 20 <- 30 <- Oops! Page 30 is in use, and we lose the rest
|
|
|
|
of the freelist, starting at 20!
|
|
|
|
|
|
|
|
The partial solution: dealloc() aquires a lock on the freelist until
|
|
|
|
commit / abort. If other transactions need to allocate when the
|
|
|
|
lock is held, then they simply do not reuse pages. Since locking is
|
|
|
|
not yet implemented, we require applications to manually serialize
|
2005-02-24 21:12:36 +00:00
|
|
|
transactions that call Talloc or Tdealloc
|
2004-08-21 00:03:30 +00:00
|
|
|
|
|
|
|
A better solution: defer the addition of 100 to the freelist until
|
|
|
|
commit, and use a 'real' data structure, like a concurrent B-Tree.
|
|
|
|
|
|
|
|
*/
|
2004-08-03 02:04:56 +00:00
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function int TpageDealloc(int xid, int pageid) {
|
2006-07-20 01:29:39 +00:00
|
|
|
TregionDealloc(xid, pageid); // @todo inefficient hack!
|
2004-08-03 02:04:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function int TpageAlloc(int xid /*, int type */) {
|
2006-07-20 01:29:39 +00:00
|
|
|
return TregionAlloc(xid, 1, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
|
2004-08-03 02:04:56 +00:00
|
|
|
}
|
2004-08-21 00:03:30 +00:00
|
|
|
|
2006-07-20 00:48:42 +00:00
|
|
|
int __fixedPageAlloc(int xid, Page * p, lsn_t lsn, recordid r, const void * d) {
|
|
|
|
fixedPageInitialize(p, r.size, recordsPerPage(r.size));
|
|
|
|
pageWriteLSN(xid, p, lsn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-05-05 21:34:12 +00:00
|
|
|
/**
|
|
|
|
@return a recordid. The page field contains the page that was
|
|
|
|
allocated, the slot field contains the number of slots on the
|
|
|
|
apge, and the size field contains the size of each slot.
|
|
|
|
*/
|
|
|
|
recordid TfixedPageAlloc(int xid, int size) {
|
|
|
|
int page = TpageAlloc(xid);
|
2006-07-20 00:48:42 +00:00
|
|
|
recordid rid = {page, recordsPerPage(size), size};
|
|
|
|
Tupdate(xid, rid, 0, OPERATION_FIXED_PAGE_ALLOC);
|
2005-05-05 21:34:12 +00:00
|
|
|
return rid;
|
|
|
|
}
|
|
|
|
|
2006-07-20 01:29:39 +00:00
|
|
|
Operation getFixedPageAlloc() {
|
2006-07-20 00:48:42 +00:00
|
|
|
Operation o = {
|
|
|
|
OPERATION_FIXED_PAGE_ALLOC,
|
|
|
|
0,
|
|
|
|
OPERATION_NOOP,
|
|
|
|
&__fixedPageAlloc
|
|
|
|
};
|
|
|
|
return o;
|
|
|
|
}
|
2005-05-05 21:34:12 +00:00
|
|
|
|
2005-02-22 03:10:54 +00:00
|
|
|
compensated_function int TpageAllocMany(int xid, int count /*, int type*/) {
|
2006-07-20 01:29:39 +00:00
|
|
|
return TregionAlloc(xid, count, STORAGE_MANAGER_NAIVE_PAGE_ALLOC);
|
|
|
|
// return 0;//newpage;
|
2004-08-03 02:04:56 +00:00
|
|
|
}
|
|
|
|
|
2004-08-21 00:03:30 +00:00
|
|
|
/** Safely allocating and freeing pages is suprisingly complex. Here is a summary of the process:
|
|
|
|
|
|
|
|
Alloc:
|
|
|
|
|
|
|
|
obtain mutex
|
|
|
|
choose a free page using in-memory data
|
|
|
|
load page to be used, and update in-memory data. (obtains lock on loaded page)
|
2005-02-22 03:10:54 +00:00
|
|
|
T update() the page, zeroing it, and saving the old successor in the log.
|
2004-08-21 00:03:30 +00:00
|
|
|
relase the page (avoid deadlock in next step)
|
2005-02-22 03:10:54 +00:00
|
|
|
T update() LLADD's header page (the first in the store file) with a new copy of
|
2004-08-21 00:03:30 +00:00
|
|
|
the in-memory data, saving old version in the log.
|
|
|
|
release mutex
|
|
|
|
|
|
|
|
Free:
|
|
|
|
|
|
|
|
obtain mutex
|
|
|
|
determine the current head of the freelist using in-memory data
|
2005-02-22 03:10:54 +00:00
|
|
|
T update() the page, initializing it to be a freepage, and physically logging the old version
|
2004-08-21 00:03:30 +00:00
|
|
|
release the page
|
2005-02-22 03:10:54 +00:00
|
|
|
T update() LLADD's header page with a new copy of the in-memory data, saving old version in the log
|
2004-08-21 00:03:30 +00:00
|
|
|
release mutex
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** frees a page by zeroing it, setting its type to LLADD_FREE_PAGE,
|
|
|
|
and setting the successor pointer. This operation physically logs
|
|
|
|
a whole page, which makes it expensive. Doing so is necessary in
|
|
|
|
general, but it is possible that application specific logic could
|
2006-07-20 01:29:39 +00:00
|
|
|
avoid the physical logging here.
|
|
|
|
|
|
|
|
Instead, we should just record the fact that the page was freed
|
|
|
|
somewhere. That way, we don't need to read the page in, or write
|
|
|
|
out information about it. If we lock the page against
|
|
|
|
reallocation until the current transaction commits, then we're
|
|
|
|
fine.
|
2004-08-03 02:04:56 +00:00
|
|
|
|
2006-07-20 01:29:39 +00:00
|
|
|
*/
|
2004-08-03 02:04:56 +00:00
|
|
|
|
|
|
|
Operation getPageSet() {
|
|
|
|
Operation o = {
|
|
|
|
OPERATION_PAGE_SET,
|
|
|
|
PAGE_SIZE, /* This is the type of the old page, for undo purposes */
|
|
|
|
/*OPERATION_PAGE_SET, */ NO_INVERSE_WHOLE_PAGE,
|
|
|
|
&__pageSet
|
|
|
|
};
|
|
|
|
return o;
|
|
|
|
}
|