diff --git a/benchmarks/lsn_bench_common.h b/benchmarks/lsn_bench_common.h index 5d322a5..182f3a1 100644 --- a/benchmarks/lsn_bench_common.h +++ b/benchmarks/lsn_bench_common.h @@ -9,14 +9,14 @@ void alloc_rids(long long num_rids, recordid ** slow, recordid ** fast) { int xid = Tbegin(); - byte * old = stasis_malloc(PAGE_SIZE, byte); - byte * new = stasis_malloc(PAGE_SIZE, byte); + byte * old_page = stasis_malloc(PAGE_SIZE, byte); + byte * new_page = stasis_malloc(PAGE_SIZE, byte); for(long long i = 0; i < num_rids; ) { pageid_t pid = TpageAlloc(xid); Page * p = loadPage(xid, pid); writelock(p->rwlatch,0); - memcpy(old, p->memAddr, PAGE_SIZE); + memcpy(old_page, p->memAddr, PAGE_SIZE); stasis_page_slotted_lsn_free_initialize_page(p); while(i < num_rids && ( @@ -35,14 +35,14 @@ void alloc_rids(long long num_rids, recordid ** slow, recordid ** fast) { assert((*slow)[i].size != -1); i++; } - memcpy(new, p->memAddr, PAGE_SIZE); - memcpy(p->memAddr, old, PAGE_SIZE); + memcpy(new_page, p->memAddr, PAGE_SIZE); + memcpy(p->memAddr, old_page, PAGE_SIZE); unlock(p->rwlatch); releasePage(p); - TpageSet(xid, pid, new); + TpageSet(xid, pid, new_page); } - free(old); - free(new); + free(old_page); + free(new_page); Tcommit(xid); } diff --git a/src/apps/cht/cht_server.c b/src/apps/cht/cht_server.c index 3a73006..696d30c 100644 --- a/src/apps/cht/cht_server.c +++ b/src/apps/cht/cht_server.c @@ -58,11 +58,11 @@ static state_name do_work(void * dfaSet, StateMachine * stateMachine, Message * { case CREATE: { - recordid new = ThashCreate(ht_xid, VARIABLE_LENGTH, VARIABLE_LENGTH); + recordid new_rid = ThashCreate(ht_xid, VARIABLE_LENGTH, VARIABLE_LENGTH); ThashInsert(ht_xid, app_state_cht->ht_ht, (byte*)&(__header_ptr(m)->hashTable), sizeof(clusterHashTable_t), - (byte*)&new, sizeof(recordid)); + (byte*)&new_rid, sizeof(recordid)); DEBUG("Created local slice of global hash table %d\n", (__header_ptr(m)->hashTable)); //Tcommit(app_state_cht->ht_xid); diff --git a/src/libdfa/monotree.c b/src/libdfa/monotree.c index b6cce9f..914d93a 100644 --- a/src/libdfa/monotree.c +++ b/src/libdfa/monotree.c @@ -86,7 +86,7 @@ void init_MonoTree(MonoTree * rb, int size) { StateMachine * allocMachine(MonoTree * rb/*, state_machine_id id*/) { - StateMachine * new; + StateMachine * new_sm; if(rb->high_water_mark >= rb->size) { compactBuffer(rb); @@ -94,24 +94,24 @@ StateMachine * allocMachine(MonoTree * rb/*, state_machine_id id*/) { if(rb->high_water_mark >= rb->size) { return (StateMachine *)0; } - new = &(rb->buffer[rb->high_water_mark]); + new_sm = &(rb->buffer[rb->high_water_mark]); rb->high_water_mark++; - new->machine_id = rb->next_id; - new->mutex = malloc(sizeof(pthread_mutex_t)); - new->sleepCond = malloc(sizeof(pthread_cond_t)); + new_sm->machine_id = rb->next_id; + new_sm->mutex = malloc(sizeof(pthread_mutex_t)); + new_sm->sleepCond = malloc(sizeof(pthread_cond_t)); - pthread_mutex_init(new->mutex, NULL); - pthread_cond_init(new->sleepCond, NULL); + pthread_mutex_init(new_sm->mutex, NULL); + pthread_cond_init(new_sm->sleepCond, NULL); rb->next_id++; - new->current_state = START_STATE; + new_sm->current_state = START_STATE; - return new; + return new_sm; } StateMachine * insertMachine(MonoTree * rb, state_machine_id id) { int new_index; - StateMachine * new; + StateMachine * new_sm; int insertion_point; /* allocMachine is much less expensive than insertMachine, so this check is probably worth the trouble @@ -148,15 +148,15 @@ StateMachine * insertMachine(MonoTree * rb, state_machine_id id) { } } - new = &(rb->buffer[insertion_point]); - new->machine_id = id; - new->current_state = START_STATE; - new->mutex = malloc(sizeof(pthread_mutex_t)); - new->sleepCond = malloc(sizeof(pthread_cond_t)); - pthread_mutex_init(new->mutex, NULL); - pthread_cond_init(new->sleepCond, NULL); + new_sm = &(rb->buffer[insertion_point]); + new_sm->machine_id = id; + new_sm->current_state = START_STATE; + new_sm->mutex = malloc(sizeof(pthread_mutex_t)); + new_sm->sleepCond = malloc(sizeof(pthread_cond_t)); + pthread_mutex_init(new_sm->mutex, NULL); + pthread_cond_init(new_sm->sleepCond, NULL); - return new; + return new_sm; } diff --git a/src/libdfa/smash.c b/src/libdfa/smash.c index d7461c1..638b656 100644 --- a/src/libdfa/smash.c +++ b/src/libdfa/smash.c @@ -75,29 +75,29 @@ void * _getSmash (smash_t * smash, state_machine_id id) { } StateMachine * _insertSmash(smash_t * smash, state_machine_id id) { - StateMachine * new; + StateMachine * new_sm; if(smash->contents+1 == smash->size) { return NULL; } smash->contents++; - new = malloc(sizeof (StateMachine)); - new->machine_id = id; - new->mutex = malloc(sizeof(pthread_mutex_t)); - new->sleepCond = malloc(sizeof(pthread_cond_t)); - new->pending = 0; - pthread_mutex_init(new->mutex, NULL); - pthread_cond_init(new->sleepCond, NULL); + new_sm = malloc(sizeof (StateMachine)); + new_sm->machine_id = id; + new_sm->mutex = malloc(sizeof(pthread_mutex_t)); + new_sm->sleepCond = malloc(sizeof(pthread_cond_t)); + new_sm->pending = 0; + pthread_mutex_init(new_sm->mutex, NULL); + pthread_cond_init(new_sm->sleepCond, NULL); - new->current_state = START_STATE; + new_sm->current_state = START_STATE; /* printf("Insert %ld\n", id); */ - ThashInsert(smash->xid, smash->hash, (byte*)&id, sizeof(state_machine_id), (byte*)new, sizeof(StateMachine)); - pblHtInsert(smash->memHash, &id, sizeof(state_machine_id), new); + ThashInsert(smash->xid, smash->hash, (byte*)&id, sizeof(state_machine_id), (byte*)new_sm, sizeof(StateMachine)); + pblHtInsert(smash->memHash, &id, sizeof(state_machine_id), new_sm); /* Tcommit(smash->xid); smash->xid = Tbegin(); */ - return new; + return new_sm; } diff --git a/src/pobj/hash.c b/src/pobj/hash.c index 5090266..3306c5d 100644 --- a/src/pobj/hash.c +++ b/src/pobj/hash.c @@ -90,12 +90,12 @@ hash_insert (struct hash *h, unsigned long key, unsigned long val) { unsigned long bucket_mask = h->bucket_mask; int bucket_index = (int) (key & bucket_mask); - struct hash_item *new; + struct hash_item *new_item; debug_start (); - new = (struct hash_item *) XMALLOC (sizeof (struct hash_item)); - if (! new) { + new_item = (struct hash_item *) XMALLOC (sizeof (struct hash_item)); + if (! new_item) { debug ("allocation failed"); debug_end (); return -1; @@ -104,10 +104,10 @@ hash_insert (struct hash *h, unsigned long key, unsigned long val) debug ("inserting %lu->%lu (%p->%p) to bucket %d", key, val, (void *) key, (void *) val, bucket_index); - new->key = key; - new->val = val; - new->next = h->table[bucket_index]; - h->table[bucket_index] = new; + new_item->key = key; + new_item->val = val; + new_item->next = h->table[bucket_index]; + h->table[bucket_index] = new_item; debug_end (); return 0; diff --git a/src/stasis/logger/filePool.c b/src/stasis/logger/filePool.c index 31dcd36..256b366 100644 --- a/src/stasis/logger/filePool.c +++ b/src/stasis/logger/filePool.c @@ -564,19 +564,19 @@ int stasis_log_file_pool_truncate(struct stasis_log_t* log, lsn_t lsn) { fp->dead_filenames[dead_offset + i][0] = 0; strcat(fp->dead_filenames[dead_offset + i], fp->live_filenames[i]); strcat(fp->dead_filenames[dead_offset + i], "~"); - char * old = build_path(fp->dirname, fp->live_filenames[i]); - char * new = build_path(fp->dirname, fp->dead_filenames[dead_offset + i]); + char * old_path = build_path(fp->dirname, fp->live_filenames[i]); + char * new_path = build_path(fp->dirname, fp->dead_filenames[dead_offset + i]); // TODO This is the only place where we hold the latch while going to disk. // Rename should be fast, but we're placing a lot of faith in the filesystem. - int err = rename(old, new); + int err = rename(old_path, new_path); if(err) { perror("could not rename file"); assert(err == -1); abort(); } close(fp->ro_fd[i]); - free(old); - free(new); + free(old_path); + free(new_path); } fp->dead_count += chunk; for(int i = 0; i < (fp->live_count - chunk); i++) { diff --git a/test/stasis/check_operations.c b/test/stasis/check_operations.c index e585125..b64fe8f 100644 --- a/test/stasis/check_operations.c +++ b/test/stasis/check_operations.c @@ -574,8 +574,8 @@ START_TEST(operation_lsn_free) { Page * p = loadPage(xid,pid); stasis_page_slotted_lsn_free_initialize_page(p); // XXX hack! - byte * old = stasis_malloc(PAGE_SIZE, byte); - memcpy(old, p->memAddr, PAGE_SIZE); + byte * old_page = stasis_malloc(PAGE_SIZE, byte); + memcpy(old_page, p->memAddr, PAGE_SIZE); int fortyTwo = 42; for(int i = 0; i < 100; i++) { rid[i] = stasis_record_alloc_begin(xid, p, sizeof(int)); @@ -583,13 +583,13 @@ START_TEST(operation_lsn_free) { stasis_record_write(xid, p, rid[i], (const byte*)&fortyTwo); stasis_page_lsn_write(xid, p, -1); } - byte * new = stasis_malloc(PAGE_SIZE, byte); - memcpy(new, p->memAddr, PAGE_SIZE); - memcpy(p->memAddr, old, PAGE_SIZE); + byte * new_page = stasis_malloc(PAGE_SIZE, byte); + memcpy(new_page, p->memAddr, PAGE_SIZE); + memcpy(p->memAddr, old_page, PAGE_SIZE); releasePage(p); - TpageSet(xid, pid, new); - free(old); - free(new); + TpageSet(xid, pid, new_page); + free(old_page); + free(new_page); Tcommit(xid); } { @@ -638,8 +638,8 @@ START_TEST(operation_reorderable) { Page * p = loadPage(xid,pid); stasis_page_slotted_lsn_free_initialize_page(p); // XXX hack! - byte * old = stasis_malloc(PAGE_SIZE, byte); - memcpy(old, p->memAddr, PAGE_SIZE); + byte * old_page = stasis_malloc(PAGE_SIZE, byte); + memcpy(old_page, p->memAddr, PAGE_SIZE); int fortyTwo = 42; for(int i = 0; i < 100; i++) { rid[i] = stasis_record_alloc_begin(xid, p, sizeof(int)); @@ -647,13 +647,13 @@ START_TEST(operation_reorderable) { stasis_record_write(xid, p, rid[i], (const byte*)&fortyTwo); stasis_page_lsn_write(xid, p, -1); } - byte * new = stasis_malloc(PAGE_SIZE, byte); - memcpy(new, p->memAddr, PAGE_SIZE); - memcpy(p->memAddr, old, PAGE_SIZE); + byte * new_page = stasis_malloc(PAGE_SIZE, byte); + memcpy(new_page, p->memAddr, PAGE_SIZE); + memcpy(p->memAddr, old_page, PAGE_SIZE); releasePage(p); - TpageSet(xid, pid, new); - free(old); - free(new); + TpageSet(xid, pid, new_page); + free(old_page); + free(new_page); Tcommit(xid); } {