More fixes to prevent tardis from running out of disk space.
This commit is contained in:
parent
e434c35203
commit
986ee57020
2 changed files with 27 additions and 11 deletions
|
@ -172,23 +172,24 @@ int truncateNow() {
|
|||
|
||||
lsn_t log_trunc = LogTruncationPoint();
|
||||
if((xact_rec_lsn - log_trunc) > MIN_INCREMENTAL_TRUNCATION) {
|
||||
//printf("xact = %ld \t log = %ld\n", xact_rec_lsn, log_trunc);
|
||||
//fprintf(stderr, "xact = %ld \t log = %ld\n", xact_rec_lsn, log_trunc);
|
||||
if((rec_lsn - log_trunc) > MIN_INCREMENTAL_TRUNCATION) {
|
||||
fprintf(stderr, "Truncating now. rec_lsn = %ld, log_trunc = %ld\n", rec_lsn, log_trunc);
|
||||
// fprintf(stderr, "Truncating now. rec_lsn = %ld, log_trunc = %ld\n", rec_lsn, log_trunc);
|
||||
fprintf(stderr, "Truncating to rec_lsn = %ld\n", rec_lsn);
|
||||
forcePageFile();
|
||||
LogTruncate(rec_lsn);
|
||||
return 1;
|
||||
} else {
|
||||
lsn_t flushed = LogFlushedLSN();
|
||||
if(flushed - log_trunc > 2 * TARGET_LOG_SIZE) {
|
||||
fprintf(stderr, "Flushing dirty buffers: rec_lsn = %ld log_trunc = %ld flushed = %ld\n", rec_lsn, log_trunc, flushed);
|
||||
//fprintf(stderr, "Flushing dirty buffers: rec_lsn = %ld log_trunc = %ld flushed = %ld\n", rec_lsn, log_trunc, flushed);
|
||||
dirtyPages_flush();
|
||||
|
||||
page_rec_lsn = dirtyPages_minRecLSN();
|
||||
rec_lsn = page_rec_lsn < xact_rec_lsn ? page_rec_lsn : xact_rec_lsn;
|
||||
rec_lsn = (rec_lsn < flushed_lsn) ? rec_lsn : flushed_lsn;
|
||||
|
||||
fprintf(stderr, "Truncating to rec_lsn = %ld\n", rec_lsn);
|
||||
fprintf(stderr, "Flushed Dirty Buffers. Truncating to rec_lsn = %ld\n", rec_lsn);
|
||||
|
||||
forcePageFile();
|
||||
LogTruncate(rec_lsn);
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
|
||||
#define LOG_NAME "check_bufferManager.log"
|
||||
#ifdef LONG_TEST
|
||||
#define THREAD_COUNT 100
|
||||
|
||||
#define THREAD_COUNT 50
|
||||
#define NUM_PAGES (MAX_BUFFER_SIZE * 2) // Otherwise, we run out of disk cache, and it takes forever to complete...
|
||||
#define PAGE_MULT 10 // This tells the system to only use every 10'th page, allowing us to quickly check >2 GB, >4 GB safeness.
|
||||
|
||||
|
@ -31,9 +32,10 @@
|
|||
#define READS_PER_THREAD (NUM_PAGES * 5)
|
||||
#define RECORDS_PER_THREAD (NUM_PAGES * 5)
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#define MAX_TRANS_LENGTH 100 // Number of writes per transaction. Keeping this low allows truncation.
|
||||
|
||||
void initializePages() {
|
||||
|
||||
int i;
|
||||
|
@ -100,9 +102,13 @@ void * workerThreadWriting(void * q) {
|
|||
|
||||
int offset = *(int*)q;
|
||||
recordid rids[RECORDS_PER_THREAD];
|
||||
|
||||
int xid = Tbegin();
|
||||
int num_ops = 0;
|
||||
|
||||
for(int i = 0 ; i < RECORDS_PER_THREAD; i++) {
|
||||
|
||||
rids[i] = Talloc(-1, sizeof(int));
|
||||
rids[i] = Talloc(xid, sizeof(int));
|
||||
/* printf("\nRID:\t%d,%d\n", rids[i].page, rids[i].slot); */
|
||||
/* fflush(NULL); */
|
||||
|
||||
|
@ -111,12 +117,19 @@ void * workerThreadWriting(void * q) {
|
|||
|
||||
}
|
||||
|
||||
if(num_ops == MAX_TRANS_LENGTH) {
|
||||
num_ops = 0;
|
||||
Tcommit(xid);
|
||||
xid = Tbegin();
|
||||
} else {
|
||||
num_ops++;
|
||||
}
|
||||
/* sched_yield(); */
|
||||
}
|
||||
for(int i = 0; i < RECORDS_PER_THREAD; i++) {
|
||||
int val = (i * 10000) + offset;
|
||||
int k;
|
||||
Page * p = loadPage(-1, rids[i].page);
|
||||
Page * p = loadPage(xid, rids[i].page);
|
||||
|
||||
assert(p->id == rids[i].page);
|
||||
|
||||
|
@ -143,7 +156,7 @@ void * workerThreadWriting(void * q) {
|
|||
Page * p;
|
||||
|
||||
|
||||
p = loadPage(-1, rids[i].page);
|
||||
p = loadPage(xid, rids[i].page);
|
||||
|
||||
readRecord(1, p, rids[i], &val);
|
||||
|
||||
|
@ -161,6 +174,8 @@ void * workerThreadWriting(void * q) {
|
|||
/* sched_yield(); */
|
||||
}
|
||||
|
||||
Tcommit(xid);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue