concurrent hashtable seems to be working; still need to work out atomicity primitives (eg; options to leave the bucket locked on success)
This commit is contained in:
parent
1cf16f62f2
commit
f5e3ab2d91
7 changed files with 384 additions and 2 deletions
|
@ -4,6 +4,7 @@ CREATE_EXECUTABLE(smallLogEntry)
|
|||
CREATE_EXECUTABLE(noopTransactions)
|
||||
CREATE_EXECUTABLE(pinSamePage)
|
||||
CREATE_EXECUTABLE(pinDifferentPages)
|
||||
CREATE_EXECUTABLE(hashtableDifferentPages)
|
||||
CREATE_EXECUTABLE(readLatch)
|
||||
CREATE_EXECUTABLE(readLatches)
|
||||
CREATE_EXECUTABLE(writeLatch)
|
||||
|
|
54
benchmarks/multicore/hashtableDifferentPages.c
Normal file
54
benchmarks/multicore/hashtableDifferentPages.c
Normal file
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* hashtableDifferentPages.c
|
||||
*
|
||||
* Created on: Oct 19, 2009
|
||||
* Author: sears
|
||||
*/
|
||||
#include <stasis/concurrentHash.h>
|
||||
#include <stasis/transactional.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
||||
char * usage = "%s numthreads numops\n";
|
||||
|
||||
unsigned long numops;
|
||||
|
||||
hashtable_t * ht;
|
||||
|
||||
static void* worker(void* arg) {
|
||||
pageid_t pid = *(pageid_t*)arg;
|
||||
hashtable_insert(ht, pid, &pid);
|
||||
for(unsigned long i = 0; i < numops; i++) {
|
||||
void * ptr = hashtable_lookup(ht, pid);
|
||||
assert(ptr == &pid);
|
||||
// Page * p = loadPage(-1, pid);
|
||||
// releasePage(p);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
if(argc != 3) { printf(usage, argv[0]); abort(); }
|
||||
char * endptr;
|
||||
unsigned long numthreads = strtoul(argv[1], &endptr, 10);
|
||||
if(*endptr != 0) { printf(usage, argv[0]); abort(); }
|
||||
numops= strtoul(argv[2], &endptr, 10) / numthreads;
|
||||
if(*endptr != 0) { printf(usage, argv[0]); abort(); }
|
||||
|
||||
pthread_t workers[numthreads];
|
||||
pageid_t pids[numthreads];
|
||||
|
||||
ht = hashtable_init(numthreads * 10, 0);
|
||||
|
||||
for(int i = 0; i < numthreads; i++) {
|
||||
pids[i] = i*2 ;
|
||||
pthread_create(&workers[i], 0, worker, &pids[i]);
|
||||
}
|
||||
for(int i = 0; i < numthreads; i++) {
|
||||
pthread_join(workers[i], 0);
|
||||
}
|
||||
|
||||
hashtable_deinit(ht);
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
ADD_LIBRARY(stasis crc32.c redblack.c lhtable.c rw.c doubleLinkedList.c
|
||||
ADD_LIBRARY(stasis crc32.c redblack.c lhtable.c concurrentHash.c rw.c doubleLinkedList.c
|
||||
common.c flags.c stats.c bufferManager.c
|
||||
linkedlist.c operations.c pageHandle.c
|
||||
bufferManager/legacy/pageFile.c
|
||||
|
|
141
src/stasis/concurrentHash.c
Normal file
141
src/stasis/concurrentHash.c
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* concurrenthash.c
|
||||
*
|
||||
* Created on: Oct 15, 2009
|
||||
* Author: sears
|
||||
*/
|
||||
#include <stasis/common.h>
|
||||
#include <assert.h>
|
||||
|
||||
typedef struct bucket {
|
||||
pageid_t key;
|
||||
pthread_mutex_t mut;
|
||||
void * val;
|
||||
} bucket_t;
|
||||
|
||||
typedef struct hashtable {
|
||||
bucket_t* buckets;
|
||||
pageid_t maxbucketid;
|
||||
pageid_t numentries;
|
||||
char tracknum;
|
||||
} hashtable_t;
|
||||
|
||||
hashtable_t * hashtable_init(pageid_t size, int tracknum) {
|
||||
pageid_t newsize = 1;
|
||||
for(int i = 0; size; i++) {
|
||||
size /= 2;
|
||||
newsize *= 2;
|
||||
}
|
||||
hashtable_t *ht = malloc(sizeof(*ht));
|
||||
|
||||
ht->maxbucketid = (newsize) - 1;
|
||||
ht->buckets = calloc(ht->maxbucketid+1, sizeof(bucket_t));
|
||||
for(pageid_t i = 0; i <= ht->maxbucketid; i++) {
|
||||
pthread_mutex_init(&ht->buckets[i].mut, 0);
|
||||
}
|
||||
|
||||
ht->numentries = 0;
|
||||
ht->tracknum = (tracknum == 0 ? 0 : 1);
|
||||
|
||||
return ht;
|
||||
}
|
||||
void hashtable_deinit(hashtable_t * ht) {
|
||||
for(pageid_t i = 0; i < ht->maxbucketid; i++) {
|
||||
pthread_mutex_destroy(&ht->buckets[i].mut);
|
||||
}
|
||||
free(ht->buckets);
|
||||
free(ht);
|
||||
}
|
||||
static inline pageid_t hashtable_func(hashtable_t *ht, pageid_t p) {
|
||||
return p & ht->maxbucketid;
|
||||
}
|
||||
typedef enum {
|
||||
LOOKUP,
|
||||
INSERT,
|
||||
TRYINSERT,
|
||||
REMOVE
|
||||
} hashtable_mode;
|
||||
static inline void * hashtable_op(hashtable_mode mode, hashtable_t *ht, pageid_t p, void *val) {
|
||||
pageid_t idx = hashtable_func(ht, p);
|
||||
void * ret;
|
||||
bucket_t *b1 = &ht->buckets[idx], *b2 = NULL;
|
||||
pthread_mutex_lock(&b1->mut); // start crabbing
|
||||
|
||||
int num_incrs = 0;
|
||||
|
||||
while(1) {
|
||||
// Loop invariants:
|
||||
// b1 is latched, b2 is unlatched
|
||||
assert(num_incrs < (ht->maxbucketid/4));
|
||||
num_incrs++;
|
||||
if(b1->key == p) { ret = b1->val; break; }
|
||||
if(b1->val == NULL) { ret = NULL; break; }
|
||||
idx = hashtable_func(ht, idx+1);
|
||||
b2 = b1;
|
||||
b1 = &ht->buckets[idx];
|
||||
pthread_mutex_lock(&b1->mut);
|
||||
pthread_mutex_unlock(&b2->mut);
|
||||
}
|
||||
if(mode == INSERT || (mode == TRYINSERT && ret == NULL)) {
|
||||
b1->key = p;
|
||||
b1->val = val;
|
||||
} else if(mode == REMOVE && ret != NULL) {
|
||||
pageid_t idx2 = idx;
|
||||
idx = hashtable_func(ht, idx+1);
|
||||
b2 = b1;
|
||||
b1 = &ht->buckets[idx];
|
||||
pthread_mutex_lock(&b1->mut);
|
||||
while(1) {
|
||||
// Loop invariants: b2 needs to be overwritten.
|
||||
// b1 and b2 are latched
|
||||
// b1 is the next bucket to consider for copying into b2.
|
||||
|
||||
// What to do with b1?
|
||||
// Case 1: It is null, we win.
|
||||
pageid_t newidx = hashtable_func(ht, b1->key);
|
||||
if(b1->val == NULL) {
|
||||
// printf("d\n"); fflush(0);
|
||||
b2->key = 0;
|
||||
b2->val = NULL;
|
||||
break;
|
||||
// Case 2: b1 belongs "after" b2
|
||||
// Subcase 1: newidx is higher than idx2, so newidx should stay where it is.
|
||||
// Subcase 2: newidx wrapped, so it is less than idx2, but more than half way around the ring.
|
||||
} else if(idx2 < newidx || (idx2 > newidx + (ht->maxbucketid/2))) {
|
||||
// skip this b1.
|
||||
// printf("s\n"); fflush(0);
|
||||
idx = hashtable_func(ht, idx+1);
|
||||
pthread_mutex_unlock(&b1->mut);
|
||||
b1 = &ht->buckets[idx];
|
||||
pthread_mutex_lock(&b1->mut);
|
||||
// Case 3: we can compact b1 into b2's slot.
|
||||
} else {
|
||||
// printf("c %lld %lld %lld %lld\n", startidx, idx2, newidx, ht->maxbucketid); fflush(0);
|
||||
b2->key = b1->key;
|
||||
b2->val = b1->val;
|
||||
pthread_mutex_unlock(&b2->mut);
|
||||
// now we need to overwrite b1, so it is the new b2.
|
||||
idx2 = idx;
|
||||
idx = hashtable_func(ht, idx+1);
|
||||
b2 = b1;
|
||||
b1 = &ht->buckets[idx];
|
||||
pthread_mutex_lock(&b1->mut);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&b2->mut);
|
||||
}
|
||||
pthread_mutex_unlock(&b1->mut); // stop crabbing
|
||||
return ret;
|
||||
}
|
||||
void * hashtable_insert(hashtable_t *ht, pageid_t p, void * val) {
|
||||
return hashtable_op(INSERT, ht, p, val);
|
||||
}
|
||||
void * hashtable_test_and_set(hashtable_t *ht, pageid_t p, void * val) {
|
||||
return hashtable_op(TRYINSERT, ht, p, val);
|
||||
}
|
||||
void * hashtable_lookup(hashtable_t *ht, pageid_t p) {
|
||||
return hashtable_op(LOOKUP, ht, p, NULL);
|
||||
}
|
||||
void * hashtable_remove(hashtable_t *ht, pageid_t p) {
|
||||
return hashtable_op(REMOVE, ht, p, NULL);
|
||||
}
|
24
stasis/concurrentHash.h
Normal file
24
stasis/concurrentHash.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* concurrentHash.h
|
||||
*
|
||||
* Created on: Oct 15, 2009
|
||||
* Author: sears
|
||||
*/
|
||||
|
||||
#ifndef CONCURRENTHASH_H_
|
||||
#define CONCURRENTHASH_H_
|
||||
#include <stasis/common.h>
|
||||
|
||||
typedef struct hashtable_t hashtable_t;
|
||||
|
||||
hashtable_t * hashtable_init(pageid_t size, int tracknum);
|
||||
void hashtable_deinit(hashtable_t * ht);
|
||||
void * hashtable_insert(hashtable_t *ht, pageid_t p, void * val);
|
||||
/** Atomically insert a value if the key was not already defined
|
||||
* @return NULL if val was inserted
|
||||
*/
|
||||
void * hashtable_test_and_set(hashtable_t *ht, pageid_t p, void * val);
|
||||
void * hashtable_lookup(hashtable_t *ht, pageid_t p);
|
||||
void * hashtable_remove(hashtable_t *ht, pageid_t p);
|
||||
|
||||
#endif /* CONCURRENTHASH_H_ */
|
|
@ -1,4 +1,5 @@
|
|||
SUBDIRS(fault_injection)
|
||||
CREATE_CHECK(check_concurrentHash)
|
||||
CREATE_CHECK(check_lhtable)
|
||||
CREATE_CHECK(check_logEntry)
|
||||
CREATE_CHECK(check_logWriter)
|
||||
|
@ -29,4 +30,4 @@ CREATE_CHECK(check_rangeTracker)
|
|||
CREATE_CHECK(check_replacementPolicy)
|
||||
CREATE_CHECK(check_lsmTree)
|
||||
CREATE_CHECK(check_groupBy)
|
||||
CREATE_CHECK(check_boundedLog)
|
||||
CREATE_CHECK(check_boundedLog)
|
||||
|
|
161
test/stasis/check_concurrentHash.c
Normal file
161
test/stasis/check_concurrentHash.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* check_concurrentHash.c
|
||||
*
|
||||
* Created on: Oct 15, 2009
|
||||
* Author: sears
|
||||
*/
|
||||
/*---
|
||||
This software is copyrighted by the Regents of the University of
|
||||
California, and other parties. The following terms apply to all files
|
||||
associated with the software unless explicitly disclaimed in
|
||||
individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose,
|
||||
provided that existing copyright notices are retained in all copies
|
||||
and that this notice is included verbatim in any distributions. No
|
||||
written agreement, license, or royalty fee is required for any of the
|
||||
authorized uses. Modifications to this software may be copyrighted by
|
||||
their authors and need not follow the licensing terms described here,
|
||||
provided that the new terms are clearly indicated on the first page of
|
||||
each file where they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
|
||||
NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, AND
|
||||
THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE
|
||||
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights" in
|
||||
the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you are
|
||||
acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
---*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include "../check_includes.h"
|
||||
|
||||
#include <stasis/concurrentHash.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
#include <limits.h>
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#define LOG_NAME "check_lhtable.log"
|
||||
|
||||
|
||||
#define NUM_OPS 100000000
|
||||
#define NUM_ENTRIES 10000
|
||||
#define NUM_THREADS 100
|
||||
#define THREAD_ENTRIES ((NUM_ENTRIES/NUM_THREADS)-1)
|
||||
|
||||
hashtable_t * ht;
|
||||
|
||||
void * worker(void * arg) {
|
||||
int stride = *(int*) arg;
|
||||
|
||||
pageid_t *data = malloc(sizeof(pageid_t) * THREAD_ENTRIES);
|
||||
|
||||
for(int i = 1; i <= THREAD_ENTRIES; i++) {
|
||||
data[i-1] = -1 * (stride + (i * NUM_THREADS));
|
||||
}
|
||||
for(int j = 0; j < NUM_OPS/ NUM_THREADS; j++) {
|
||||
int op = myrandom(2);
|
||||
|
||||
int i = myrandom(THREAD_ENTRIES);
|
||||
|
||||
pageid_t scratch = data[i];
|
||||
if(data[i] < 0) {
|
||||
scratch *= -1;
|
||||
}
|
||||
switch(op) {
|
||||
case 0: {
|
||||
void * ret;
|
||||
if(data[i] < 0) {
|
||||
ret = hashtable_insert(ht, scratch, &data[i]);
|
||||
assert(ret == NULL);
|
||||
data[i] *= -1;
|
||||
} else {
|
||||
ret = hashtable_remove(ht, scratch);
|
||||
assert(ret == &data[i]);
|
||||
data[i] *= -1;
|
||||
}
|
||||
} break;
|
||||
case 1: {
|
||||
void * ret = hashtable_lookup(ht, scratch);
|
||||
if(data[i] < 0) {
|
||||
assert(ret == NULL);
|
||||
} else {
|
||||
assert(ret == &data[i]);
|
||||
}
|
||||
} break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
}
|
||||
free(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
START_TEST(singleThreadHashTest) {
|
||||
ht = hashtable_init((pageid_t)((double)THREAD_ENTRIES * 1.1), 0);
|
||||
int i = 0;
|
||||
worker(&i);
|
||||
hashtable_deinit(ht);
|
||||
} END_TEST
|
||||
|
||||
START_TEST(concurrentHashTest) {
|
||||
ht = hashtable_init((pageid_t)((double)NUM_ENTRIES * 1.1), 0);
|
||||
pthread_t workers[NUM_THREADS];
|
||||
for(int i = 0 ; i < NUM_THREADS; i++) {
|
||||
int * ip = malloc(sizeof(int));
|
||||
*ip = i;
|
||||
pthread_create(&workers[i], 0, worker, ip);
|
||||
}
|
||||
for(int i = 0 ; i < NUM_THREADS; i++) {
|
||||
pthread_join(workers[i],0);
|
||||
}
|
||||
hashtable_deinit(ht);
|
||||
} END_TEST
|
||||
|
||||
Suite * check_suite(void) {
|
||||
Suite *s = suite_create("lhtable");
|
||||
/* Begin a new test */
|
||||
TCase *tc = tcase_create("lhtable");
|
||||
|
||||
tcase_set_timeout(tc, 0); // disable timeouts
|
||||
|
||||
srandom(43);
|
||||
|
||||
/* Sub tests are added, one per line, here */
|
||||
tcase_add_test(tc, singleThreadHashTest);
|
||||
tcase_add_test(tc, concurrentHashTest);
|
||||
|
||||
/* --------------------------------------------- */
|
||||
|
||||
tcase_add_checked_fixture(tc, setup, teardown);
|
||||
|
||||
|
||||
suite_add_tcase(s, tc);
|
||||
return s;
|
||||
}
|
||||
|
||||
#include "../check_setup.h"
|
||||
|
Loading…
Reference in a new issue