From: jdybnis Date: Mon, 15 Dec 2008 03:06:46 +0000 (+0000) Subject: typedefs for map keys and values X-Git-Url: https://pd.if.org/git/?p=nbds;a=commitdiff_plain;h=a19bce63ef088ad03004bc8e9bfde4901d978151 typedefs for map keys and values --- diff --git a/include/hashtable.h b/include/hashtable.h index 0a9ea20..880f2a4 100644 --- a/include/hashtable.h +++ b/include/hashtable.h @@ -7,15 +7,15 @@ typedef struct ht hashtable_t; typedef struct ht_iter ht_iter_t; hashtable_t * ht_alloc (const datatype_t *key_type); -uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t val); -uint64_t ht_get (hashtable_t *ht, void *key); -uint64_t ht_remove (hashtable_t *ht, void *key); +uint64_t ht_cas (hashtable_t *ht, map_key_t key, uint64_t expected_val, uint64_t val); +uint64_t ht_get (hashtable_t *ht, map_key_t key); +uint64_t ht_remove (hashtable_t *ht, map_key_t key); uint64_t ht_count (hashtable_t *ht); void ht_print (hashtable_t *ht); void ht_free (hashtable_t *ht); -ht_iter_t * ht_iter_begin (hashtable_t *ht, void *key); -uint64_t ht_iter_next (ht_iter_t *iter, void **key_ptr); +ht_iter_t * ht_iter_begin (hashtable_t *ht, map_key_t key); +uint64_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr); void ht_iter_free (ht_iter_t *iter); static const map_impl_t ht_map_impl = { diff --git a/include/list.h b/include/list.h index 69e6814..7eb815e 100644 --- a/include/list.h +++ b/include/list.h @@ -6,17 +6,17 @@ typedef struct ll list_t; typedef struct ll_iter ll_iter_t; -list_t * ll_alloc (const datatype_t *key_type); -uint64_t ll_cas (list_t *ll, void *key, uint64_t expected_val, uint64_t new_val); -uint64_t ll_lookup (list_t *ll, void *key); -uint64_t ll_remove (list_t *ll, void *key); -uint64_t ll_count (list_t *ll); -void ll_print (list_t *ll); -void ll_free (list_t *ll); -void * ll_min_key (list_t *sl); +list_t * ll_alloc (const datatype_t *key_type); +map_val_t ll_cas (list_t *ll, map_key_t key, map_val_t expected_val, map_val_t new_val); +map_val_t ll_lookup (list_t *ll, map_key_t key); +map_val_t ll_remove (list_t *ll, map_key_t key); +map_val_t ll_count (list_t *ll); +void ll_print (list_t *ll); +void ll_free (list_t *ll); +map_key_t ll_min_key (list_t *sl); -ll_iter_t * ll_iter_begin (list_t *ll, void *key); -uint64_t ll_iter_next (ll_iter_t *iter, void **key_ptr); +ll_iter_t * ll_iter_begin (list_t *ll, map_key_t key); +map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr); void ll_iter_free (ll_iter_t *iter); static const map_impl_t ll_map_impl = { diff --git a/include/map.h b/include/map.h index 27e8dae..33092d3 100644 --- a/include/map.h +++ b/include/map.h @@ -7,19 +7,22 @@ typedef struct map map_t; typedef struct map_iter map_iter_t; typedef struct map_impl map_impl_t; -map_t * map_alloc (const map_impl_t *map_impl, const datatype_t *key_type); -uint64_t map_get (map_t *map, void *key); -uint64_t map_set (map_t *map, void *key, uint64_t new_val); -uint64_t map_add (map_t *map, void *key, uint64_t new_val); -uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val); -uint64_t map_replace (map_t *map, void *key, uint64_t new_val); -uint64_t map_remove (map_t *map, void *key); -uint64_t map_count (map_t *map); -void map_print (map_t *map); -void map_free (map_t *map); - -map_iter_t * map_iter_begin (map_t *map, void *key); -uint64_t map_iter_next (map_iter_t *iter, void **key); +typedef void * map_key_t; +typedef uint64_t map_val_t; + +map_t * map_alloc (const map_impl_t *map_impl, const datatype_t *key_type); +map_val_t map_get (map_t *map, map_key_t key); +map_val_t map_set (map_t *map, map_key_t key, map_val_t new_val); +map_val_t map_add (map_t *map, map_key_t key, map_val_t new_val); +map_val_t map_cas (map_t *map, map_key_t key, map_val_t expected_val, map_val_t new_val); +map_val_t map_replace (map_t *map, map_key_t key, map_val_t new_val); +map_val_t map_remove (map_t *map, map_key_t key); +map_val_t map_count (map_t *map); +void map_print (map_t *map); +void map_free (map_t *map); + +map_iter_t * map_iter_begin (map_t *map, map_key_t key); +map_val_t map_iter_next (map_iter_t *iter, map_key_t *key); void map_iter_free (map_iter_t *iter); ///////////////////////////////////////////////////////////////////////////////////// @@ -28,16 +31,16 @@ void map_iter_free (map_iter_t *iter); #define CAS_EXPECT_EXISTS (-1) #define CAS_EXPECT_WHATEVER (-2) -typedef void * (*map_alloc_t) (const datatype_t *); -typedef uint64_t (*map_cas_t) (map_impl_t *, void *, uint64_t, uint64_t); -typedef uint64_t (*map_get_t) (map_impl_t *, void *); -typedef uint64_t (*map_remove_t) (map_impl_t *, void *); -typedef uint64_t (*map_count_t) (map_impl_t *); -typedef void (*map_print_t) (map_impl_t *); -typedef void (*map_free_t) (map_impl_t *); +typedef void * (*map_alloc_t) (const datatype_t *); +typedef map_val_t (*map_cas_t) (map_impl_t *, map_key_t , map_val_t, map_val_t); +typedef map_val_t (*map_get_t) (map_impl_t *, map_key_t ); +typedef map_val_t (*map_remove_t) (map_impl_t *, map_key_t ); +typedef map_val_t (*map_count_t) (map_impl_t *); +typedef void (*map_print_t) (map_impl_t *); +typedef void (*map_free_t) (map_impl_t *); -typedef map_iter_t * (*map_iter_begin_t) (map_impl_t *, void *); -typedef uint64_t (*map_iter_next_t) (map_iter_t *, void **); +typedef map_iter_t * (*map_iter_begin_t) (map_impl_t *, map_key_t ); +typedef map_val_t (*map_iter_next_t) (map_iter_t *, map_key_t *); typedef void (*map_iter_free_t) (map_iter_t *); struct map_impl { diff --git a/include/skiplist.h b/include/skiplist.h index 8df577a..5c707fc 100644 --- a/include/skiplist.h +++ b/include/skiplist.h @@ -7,16 +7,16 @@ typedef struct sl skiplist_t; typedef struct sl_iter sl_iter_t; skiplist_t * sl_alloc (const datatype_t *key_type); -uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expected_val, uint64_t new_val); -uint64_t sl_lookup (skiplist_t *sl, void *key); -uint64_t sl_remove (skiplist_t *sl, void *key); -uint64_t sl_count (skiplist_t *sl); -void sl_print (skiplist_t *sl); -void sl_free (skiplist_t *sl); -void * sl_min_key (skiplist_t *sl); +map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expected_val, map_val_t new_val); +map_val_t sl_lookup (skiplist_t *sl, map_key_t key); +map_val_t sl_remove (skiplist_t *sl, map_key_t key); +map_val_t sl_count (skiplist_t *sl); +void sl_print (skiplist_t *sl); +void sl_free (skiplist_t *sl); +map_key_t sl_min_key (skiplist_t *sl); -sl_iter_t * sl_iter_begin (skiplist_t *sl, void *key); -uint64_t sl_iter_next (sl_iter_t *iter, void **key_ptr); +sl_iter_t * sl_iter_begin (skiplist_t *sl, map_key_t key); +map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr); void sl_iter_free (sl_iter_t *iter); static const map_impl_t sl_map_impl = { diff --git a/map/hashtable.c b/map/hashtable.c index a853b4c..64f7a47 100644 --- a/map/hashtable.c +++ b/map/hashtable.c @@ -8,8 +8,8 @@ * * Note: This is code uses synchronous atomic operations because that is all that x86 provides. * Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies - * the code a bit, but it won't be as fast as it could be on platforms like SPARC that provide - * weaker operations which would still do the job. + * the code a bit, but it won't be as fast as it could be on platforms that provide weaker + * operations like and unfenced CAS which would still do the job. */ #include @@ -21,8 +21,8 @@ #define GET_PTR(x) ((void *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t typedef struct entry { - uint64_t key; - uint64_t val; + uint64_t key; + map_val_t val; } entry_t; typedef struct hti { @@ -40,8 +40,6 @@ typedef struct hti { struct ht_iter { hti_t * hti; int64_t idx; - uint64_t key; - uint64_t val; }; struct ht { @@ -49,8 +47,8 @@ struct ht { const datatype_t *key_type; }; -static const uint64_t COPIED_VALUE = -1; -static const uint64_t TOMBSTONE = STRIP_TAG(-1, TAG1); +static const map_val_t COPIED_VALUE = -1; +static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1); static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t); static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2; @@ -74,7 +72,7 @@ static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) { // // Record if the entry being returned is empty. Otherwise the caller will have to waste time // re-comparing the keys to confirm that it did not lose a race to fill an empty entry. -static volatile entry_t *hti_lookup (hti_t *hti, void *key, uint32_t key_hash, int *is_empty) { +static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_hash, int *is_empty) { TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti); *is_empty = 0; @@ -186,7 +184,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale)); assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48)); - uint64_t ht1_ent_val = ht1_ent->val; + map_val_t ht1_ent_val = ht1_ent->val; if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE)) { TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2); return FALSE; // already copied @@ -194,7 +192,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // Kill empty entries. if (EXPECT_FALSE(ht1_ent_val == DOES_NOT_EXIST)) { - uint64_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE); + map_val_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE); if (ht1_ent_val == DOES_NOT_EXIST) { TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_ent, 0); return TRUE; @@ -216,7 +214,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // Install the key in the new table. uint64_t ht1_ent_key = ht1_ent->key; - void *key = (ht1->ht->key_type == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key); + map_key_t key = (ht1->ht->key_type == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key); // The old table's dead entries don't need to be copied to the new table, but their keys need to be freed. assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE, TAG1)); @@ -258,7 +256,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // Copy the value to the entry in the new table. ht1_ent_val = STRIP_TAG(ht1_ent_val, TAG1); - uint64_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val); + map_val_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val); // If there is a nested copy in progress, we might have installed the key into a dead entry. if (old_ht2_ent_val == COPIED_VALUE) { @@ -296,7 +294,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as is in the table. If // is CAS_EXPECT_WHATEVER then skip the test entirely. // -static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expected, uint64_t new) { +static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_t expected, map_val_t new) { TRACE("h1", "hti_cas: hti %p key %p", hti, key); TRACE("h1", "hti_cas: value %p expect %p", new, expected); assert(hti); @@ -351,7 +349,7 @@ static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expe (hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent); // If the entry is in the middle of a copy, the copy must be completed first. - uint64_t ent_val = ent->val; + map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (ent_val != COPIED_VALUE) { int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); @@ -382,7 +380,7 @@ static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expe } // CAS the value into the entry. Retry if it fails. - uint64_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new); + map_val_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new); if (EXPECT_FALSE(v != ent_val)) { TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v); return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call @@ -401,7 +399,7 @@ static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expe } // -static uint64_t hti_get (hti_t *hti, void *key, uint32_t key_hash) { +static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) { int is_empty; volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty); @@ -418,7 +416,7 @@ static uint64_t hti_get (hti_t *hti, void *key, uint32_t key_hash) { return DOES_NOT_EXIST; // If the entry is being copied, finish the copy and retry on the next table. - uint64_t ent_val = ent->val; + map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (EXPECT_FALSE(ent_val != COPIED_VALUE)) { int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); @@ -433,7 +431,7 @@ static uint64_t hti_get (hti_t *hti, void *key, uint32_t key_hash) { } // -uint64_t ht_get (hashtable_t *ht, void *key) { +map_val_t ht_get (hashtable_t *ht, map_key_t key) { uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key); return hti_get(ht->hti, key, hash); } @@ -443,8 +441,8 @@ int hti_help_copy (hti_t *hti) { volatile entry_t *ent; uint64_t limit; uint64_t total_copied = hti->num_entries_copied; - int num_copied = 0; - int x = hti->copy_scan; + uint64_t num_copied = 0; + uint64_t x = hti->copy_scan; TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<scale); if (total_copied != (1 << hti->scale)) { @@ -482,7 +480,7 @@ int hti_help_copy (hti_t *hti) { } // -uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t new_val) { +map_val_t ht_cas (hashtable_t *ht, map_key_t key, map_val_t expected_val, map_val_t new_val) { TRACE("h2", "ht_cas: key %p ht %p", key, ht); TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val); @@ -509,7 +507,7 @@ uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t new } } - uint64_t old_val; + map_val_t old_val; uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key); while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) { assert(hti->next); @@ -521,9 +519,9 @@ uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t new // Remove the value in associated with . Returns the value removed, or DOES_NOT_EXIST if there was // no value for that key. -uint64_t ht_remove (hashtable_t *ht, void *key) { +map_val_t ht_remove (hashtable_t *ht, map_key_t key) { hti_t *hti = ht->hti; - uint64_t val; + map_val_t val; uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key); do { val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST); @@ -588,7 +586,7 @@ void ht_print (hashtable_t *ht) { } } -ht_iter_t *ht_iter_begin (hashtable_t *ht, void *key) { +ht_iter_t *ht_iter_begin (hashtable_t *ht, map_key_t key) { assert(key == NULL); hti_t *hti = ht->hti; int rcount; @@ -614,10 +612,10 @@ ht_iter_t *ht_iter_begin (hashtable_t *ht, void *key) { return iter; } -uint64_t ht_iter_next (ht_iter_t *iter, void **key_ptr) { +map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) { volatile entry_t *ent; - uint64_t key; - uint64_t val; + map_key_t key; + map_val_t val; uint64_t table_size = (1 << iter->hti->scale); do { iter->idx++; @@ -625,19 +623,19 @@ uint64_t ht_iter_next (ht_iter_t *iter, void **key_ptr) { return DOES_NOT_EXIST; } ent = &iter->hti->table[iter->idx]; - key = ent->key; + key = (iter->hti->ht->key_type == NULL) ? (map_key_t)ent->key : GET_PTR(ent->key); val = ent->val; } while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE); if (key_ptr) { - *key_ptr = (void *)key; + *key_ptr = key; } if (val == COPIED_VALUE) { uint32_t hash = (iter->hti->ht->key_type == NULL) - ? murmur32_8b(key) - : iter->hti->ht->key_type->hash((void *)key); - val = hti_get(iter->hti->next, (void *)ent->key, hash); + ? murmur32_8b((uint64_t)key) + : iter->hti->ht->key_type->hash(key); + val = hti_get(iter->hti->next, (map_key_t)ent->key, hash); } return val; diff --git a/map/list.c b/map/list.c index f2e9c82..b9df598 100644 --- a/map/list.c +++ b/map/list.c @@ -16,8 +16,8 @@ typedef struct ll_iter node_t; struct ll_iter { - void *key; - uint64_t val; + map_key_t key; + map_val_t val; node_t *next; }; @@ -26,7 +26,7 @@ struct ll { const datatype_t *key_type; }; -static node_t *node_alloc (void *key, uint64_t val) { +static node_t *node_alloc (map_key_t key, map_val_t val) { node_t *item = (node_t *)nbd_malloc(sizeof(node_t)); item->key = key; item->val = val; @@ -62,7 +62,7 @@ uint64_t ll_count (list_t *ll) { return count; } -static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, void *key, int help_remove) { +static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, map_key_t key, int help_remove) { node_t *pred = ll->head; node_t *item = pred->next; TRACE("l2", "find_pred: searching for key %p in list (head is %p)", key, pred); @@ -152,14 +152,14 @@ static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, void *ke } // Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor. -uint64_t ll_lookup (list_t *ll, void *key) { +map_val_t ll_lookup (list_t *ll, map_key_t key) { TRACE("l1", "ll_lookup: searching for key %p in list %p", key, ll); node_t *item; int found = find_pred(NULL, &item, ll, key, FALSE); // If we found an matching the key return its value. if (found) { - uint64_t val = item->val; + map_val_t val = item->val; if (val != DOES_NOT_EXIST) { TRACE("l1", "ll_lookup: found item %p. val %p. returning item", item, item->val); return val; @@ -170,7 +170,7 @@ uint64_t ll_lookup (list_t *ll, void *key) { return DOES_NOT_EXIST; } -uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) { +map_val_t ll_cas (list_t *ll, map_key_t key, map_val_t expectation, map_val_t new_val) { TRACE("l1", "ll_cas: key %p list %p", key, ll); TRACE("l1", "ll_cas: expectation %p new value %p", expectation, new_val); ASSERT((int64_t)new_val > 0); @@ -190,7 +190,7 @@ uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) // Create a new item and insert it into the list. TRACE("l2", "ll_cas: attempting to insert item between %p and %p", pred, pred->next); - void *new_key = (ll->key_type == NULL) ? key : ll->key_type->clone(key); + map_key_t new_key = (ll->key_type == NULL) ? key : ll->key_type->clone(key); node_t *new_item = node_alloc(new_key, new_val); node_t *next = new_item->next = old_item; node_t *other = SYNC_CAS(&pred->next, next, new_item); @@ -209,7 +209,7 @@ uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) } // Found an item in the list that matches the key. - uint64_t old_item_val = old_item->val; + map_val_t old_item_val = old_item->val; do { // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us. if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) { @@ -227,7 +227,7 @@ uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it // succeeded and return our value even though we indicated that the node has been removed. If the CAS // fails it means another thread either removed the node or updated its value. - uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); + map_val_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); if (ret_val == old_item_val) { TRACE("l1", "ll_cas: the CAS succeeded. updated the value of the item", 0, 0); return ret_val; // success @@ -239,7 +239,7 @@ uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) } while (1); } -uint64_t ll_remove (list_t *ll, void *key) { +map_val_t ll_remove (list_t *ll, map_key_t key) { TRACE("l1", "ll_remove: removing item with key %p from list %p", key, ll); node_t *pred; node_t *item; @@ -265,7 +265,7 @@ uint64_t ll_remove (list_t *ll, void *key) { // Atomically swap out the item's value in case another thread is updating the item while we are // removing it. This establishes which operation occurs first logically, the update or the remove. - uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); + map_val_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); TRACE("l2", "ll_remove: replaced item's val %p with DOES_NOT_EXIT", val, 0); // Unlink from . If we lose a race to another thread just back off. It is safe to leave the @@ -307,13 +307,13 @@ void ll_print (list_t *ll) { printf("\n"); } -ll_iter_t *ll_iter_begin (list_t *ll, void *key) { +ll_iter_t *ll_iter_begin (list_t *ll, map_key_t key) { node_t *iter = node_alloc(0,0); find_pred(NULL, &iter->next, ll, key, FALSE); return iter; } -uint64_t ll_iter_next (ll_iter_t *iter, void **key_ptr) { +map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr) { assert(iter); node_t *item = iter->next; while (item != NULL && IS_TAGGED(item->next, TAG1)) { diff --git a/map/map.c b/map/map.c index fcd9f83..176b79a 100644 --- a/map/map.c +++ b/map/map.c @@ -34,42 +34,42 @@ void map_print (map_t *map) { map->impl->print(map->data); } -uint64_t map_count (map_t *map) { +map_val_t map_count (map_t *map) { return map->impl->count(map->data); } -uint64_t map_get (map_t *map, void *key) { +map_val_t map_get (map_t *map, map_key_t key) { return map->impl->get(map->data, key); } -uint64_t map_set (map_t *map, void *key, uint64_t new_val) { +map_val_t map_set (map_t *map, map_key_t key, map_val_t new_val) { return map->impl->cas(map->data, key, CAS_EXPECT_WHATEVER, new_val); } -uint64_t map_add (map_t *map, void *key, uint64_t new_val) { +map_val_t map_add (map_t *map, map_key_t key, map_val_t new_val) { return map->impl->cas(map->data, key, CAS_EXPECT_DOES_NOT_EXIST, new_val); } -uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val) { +map_val_t map_cas (map_t *map, map_key_t key, map_val_t expected_val, map_val_t new_val) { return map->impl->cas(map->data, key, expected_val, new_val); } -uint64_t map_replace(map_t *map, void *key, uint64_t new_val) { +map_val_t map_replace(map_t *map, map_key_t key, map_val_t new_val) { return map->impl->cas(map->data, key, CAS_EXPECT_EXISTS, new_val); } -uint64_t map_remove (map_t *map, void *key) { +map_val_t map_remove (map_t *map, map_key_t key) { return map->impl->remove(map->data, key); } -map_iter_t * map_iter_begin (map_t *map, void *key) { +map_iter_t * map_iter_begin (map_t *map, map_key_t key) { map_iter_t *iter = nbd_malloc(sizeof(map_iter_t)); iter->impl = map->impl; iter->state = map->impl->iter_begin(map->data, key); return iter; } -uint64_t map_iter_next (map_iter_t *iter, void **key_ptr) { +map_val_t map_iter_next (map_iter_t *iter, map_key_t *key_ptr) { return iter->impl->iter_next(iter->state, key_ptr); } diff --git a/map/skiplist.c b/map/skiplist.c index 1f608b4..afdb1f7 100644 --- a/map/skiplist.c +++ b/map/skiplist.c @@ -31,8 +31,8 @@ typedef struct sl_iter node_t; struct sl_iter { - void *key; - uint64_t val; + map_key_t key; + map_val_t val; int top_level; node_t *next[]; }; @@ -54,7 +54,7 @@ static int random_level (void) { return n; } -static node_t *node_alloc (int level, void *key, uint64_t val) { +static node_t *node_alloc (int level, map_key_t key, map_val_t val) { assert(level >= 0 && level <= MAX_LEVEL); size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *); node_t *item = (node_t *)nbd_malloc(sz); @@ -94,7 +94,7 @@ uint64_t sl_count (skiplist_t *sl) { return count; } -static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, void *key, int help_remove) { +static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, map_key_t key, int help_remove) { node_t *pred = sl->head; node_t *item = NULL; TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred); @@ -216,13 +216,13 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl } // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors. -uint64_t sl_lookup (skiplist_t *sl, void *key) { +map_val_t sl_lookup (skiplist_t *sl, map_key_t key) { TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl); node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE); // If we found an matching the return its value. if (item != NULL) { - uint64_t val = item->val; + map_val_t val = item->val; if (val != DOES_NOT_EXIST) { TRACE("s1", "sl_lookup: found item %p. val %p. returning item", item, item->val); return val; @@ -233,7 +233,7 @@ uint64_t sl_lookup (skiplist_t *sl, void *key) { return DOES_NOT_EXIST; } -void *sl_min_key (skiplist_t *sl) { +map_key_t sl_min_key (skiplist_t *sl) { node_t *item = sl->head->next[0]; while (item != NULL) { node_t *next = item->next[0]; @@ -244,7 +244,7 @@ void *sl_min_key (skiplist_t *sl) { return DOES_NOT_EXIST; } -uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_val) { +map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expectation, map_val_t new_val) { TRACE("s1", "sl_cas: key %p skiplist %p", key, sl); TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val); ASSERT((int64_t)new_val > 0); @@ -267,7 +267,7 @@ uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_v // First insert into the bottom level. TRACE("s3", "sl_cas: attempting to insert item between %p and %p", preds[0], nexts[0]); - void *new_key = (sl->key_type == NULL) ? key : sl->key_type->clone(key); + map_key_t new_key = (sl->key_type == NULL) ? key : sl->key_type->clone(key); new_item = node_alloc(n, new_key, new_val); node_t *pred = preds[0]; node_t *next = new_item->next[0] = nexts[0]; @@ -288,7 +288,7 @@ uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_v } // Found an item in the skiplist that matches the key. - uint64_t old_item_val = old_item->val; + map_val_t old_item_val = old_item->val; do { // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us. if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) { @@ -306,7 +306,7 @@ uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_v // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it // succeeded and return our value even though we indicated that the node has been removed. If the CAS // fails it means another thread either removed the node or updated its value. - uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); + map_val_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); if (ret_val == old_item_val) { TRACE("s1", "sl_cas: the CAS succeeded. updated the value of the item", 0, 0); return ret_val; // success @@ -349,7 +349,7 @@ uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_v return DOES_NOT_EXIST; // success } -uint64_t sl_remove (skiplist_t *sl, void *key) { +map_val_t sl_remove (skiplist_t *sl, map_key_t key) { TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl); node_t *preds[MAX_LEVEL+1]; node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE); @@ -413,7 +413,7 @@ uint64_t sl_remove (skiplist_t *sl, void *key) { // Atomically swap out the item's value in case another thread is updating the item while we are // removing it. This establishes which operation occurs first logically, the update or the remove. - uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); + map_val_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0); node_t *pred = preds[0]; @@ -475,13 +475,13 @@ void sl_print (skiplist_t *sl) { } } -sl_iter_t *sl_iter_begin (skiplist_t *sl, void *key) { +sl_iter_t *sl_iter_begin (skiplist_t *sl, map_key_t key) { node_t *iter = node_alloc(0, 0, 0); find_preds(NULL, &iter->next[0], 0, sl, key, FALSE); return iter; } -uint64_t sl_iter_next (sl_iter_t *iter, void **key_ptr) { +map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr) { assert(iter); node_t *item = iter->next[0]; while (item != NULL && IS_TAGGED(item->next[0], TAG1)) { diff --git a/test/map_test2.c b/test/map_test2.c index 1ba4e42..bb06971 100644 --- a/test/map_test2.c +++ b/test/map_test2.c @@ -1,6 +1,9 @@ /* * Written by Josh Dybnis and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain + * + * tests ported from high-scale-lib + * http://sourceforge.net/projects/high-scale-lib */ #include #include diff --git a/todo b/todo index 807a31a..da53a45 100644 --- a/todo +++ b/todo @@ -7,20 +7,33 @@ + optimize integer keys + ht_print() + iterators + +memory manangement +------------------ - make rcu yield when its buffer gets full instead of throwing an assert - alternate memory reclamation schemes, hazard pointers and/or reference count -- investigate 16 byte CAS; ht can store GUIDs inline instead of pointers to actual keys -- document usage -- document algorithms -- port tests from lib-high-scale -- 32 bit version of hashtable -- verify list and skiplist work on 32 bit platforms +- verify key management in list, skiplist, and hashtable + +quaility +-------- - transaction tests -- validate the arguments to interface functions +- port perf tests from lib-high-scale +- characterize the performance of hashtable, list and skiplist +- validate arguments in interface functions +- document usage of the library +- document algorithms + +optimization +------------ +- investigate 16 byte CAS; ht can store GUIDs inline instead of pointers to actual keys - shortcut from write-set to entries/nodes - use a shared scan for write-set validation, similar to ht copy logic -- characterize the performance of hashtable, list and skiplist - experiment with the performance impact of not passing the hash between functions in ht - experiment with embedding the keys in the list/skiplist nodes + +features +-------- +- 32 bit version of hashtable +- verify list and skiplist work on 32 bit platforms - allow values of 0 to be inserted into maps (change DOES_NOT_EXIST to something else) -- see if it's possible to rename nbd_malloc to malloc +- seperate nbd_malloc/nbd_free into general purpose malloc/free replacement