typedef struct ht_iter ht_iter_t;
hashtable_t * ht_alloc (const datatype_t *key_type);
-uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t val);
-uint64_t ht_get (hashtable_t *ht, void *key);
-uint64_t ht_remove (hashtable_t *ht, void *key);
+uint64_t ht_cas (hashtable_t *ht, map_key_t key, uint64_t expected_val, uint64_t val);
+uint64_t ht_get (hashtable_t *ht, map_key_t key);
+uint64_t ht_remove (hashtable_t *ht, map_key_t key);
uint64_t ht_count (hashtable_t *ht);
void ht_print (hashtable_t *ht);
void ht_free (hashtable_t *ht);
-ht_iter_t * ht_iter_begin (hashtable_t *ht, void *key);
-uint64_t ht_iter_next (ht_iter_t *iter, void **key_ptr);
+ht_iter_t * ht_iter_begin (hashtable_t *ht, map_key_t key);
+uint64_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr);
void ht_iter_free (ht_iter_t *iter);
static const map_impl_t ht_map_impl = {
typedef struct ll list_t;
typedef struct ll_iter ll_iter_t;
-list_t * ll_alloc (const datatype_t *key_type);
-uint64_t ll_cas (list_t *ll, void *key, uint64_t expected_val, uint64_t new_val);
-uint64_t ll_lookup (list_t *ll, void *key);
-uint64_t ll_remove (list_t *ll, void *key);
-uint64_t ll_count (list_t *ll);
-void ll_print (list_t *ll);
-void ll_free (list_t *ll);
-void * ll_min_key (list_t *sl);
+list_t * ll_alloc (const datatype_t *key_type);
+map_val_t ll_cas (list_t *ll, map_key_t key, map_val_t expected_val, map_val_t new_val);
+map_val_t ll_lookup (list_t *ll, map_key_t key);
+map_val_t ll_remove (list_t *ll, map_key_t key);
+map_val_t ll_count (list_t *ll);
+void ll_print (list_t *ll);
+void ll_free (list_t *ll);
+map_key_t ll_min_key (list_t *sl);
-ll_iter_t * ll_iter_begin (list_t *ll, void *key);
-uint64_t ll_iter_next (ll_iter_t *iter, void **key_ptr);
+ll_iter_t * ll_iter_begin (list_t *ll, map_key_t key);
+map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr);
void ll_iter_free (ll_iter_t *iter);
static const map_impl_t ll_map_impl = {
typedef struct map_iter map_iter_t;
typedef struct map_impl map_impl_t;
-map_t * map_alloc (const map_impl_t *map_impl, const datatype_t *key_type);
-uint64_t map_get (map_t *map, void *key);
-uint64_t map_set (map_t *map, void *key, uint64_t new_val);
-uint64_t map_add (map_t *map, void *key, uint64_t new_val);
-uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val);
-uint64_t map_replace (map_t *map, void *key, uint64_t new_val);
-uint64_t map_remove (map_t *map, void *key);
-uint64_t map_count (map_t *map);
-void map_print (map_t *map);
-void map_free (map_t *map);
-
-map_iter_t * map_iter_begin (map_t *map, void *key);
-uint64_t map_iter_next (map_iter_t *iter, void **key);
+typedef void * map_key_t;
+typedef uint64_t map_val_t;
+
+map_t * map_alloc (const map_impl_t *map_impl, const datatype_t *key_type);
+map_val_t map_get (map_t *map, map_key_t key);
+map_val_t map_set (map_t *map, map_key_t key, map_val_t new_val);
+map_val_t map_add (map_t *map, map_key_t key, map_val_t new_val);
+map_val_t map_cas (map_t *map, map_key_t key, map_val_t expected_val, map_val_t new_val);
+map_val_t map_replace (map_t *map, map_key_t key, map_val_t new_val);
+map_val_t map_remove (map_t *map, map_key_t key);
+map_val_t map_count (map_t *map);
+void map_print (map_t *map);
+void map_free (map_t *map);
+
+map_iter_t * map_iter_begin (map_t *map, map_key_t key);
+map_val_t map_iter_next (map_iter_t *iter, map_key_t *key);
void map_iter_free (map_iter_t *iter);
/////////////////////////////////////////////////////////////////////////////////////
#define CAS_EXPECT_EXISTS (-1)
#define CAS_EXPECT_WHATEVER (-2)
-typedef void * (*map_alloc_t) (const datatype_t *);
-typedef uint64_t (*map_cas_t) (map_impl_t *, void *, uint64_t, uint64_t);
-typedef uint64_t (*map_get_t) (map_impl_t *, void *);
-typedef uint64_t (*map_remove_t) (map_impl_t *, void *);
-typedef uint64_t (*map_count_t) (map_impl_t *);
-typedef void (*map_print_t) (map_impl_t *);
-typedef void (*map_free_t) (map_impl_t *);
+typedef void * (*map_alloc_t) (const datatype_t *);
+typedef map_val_t (*map_cas_t) (map_impl_t *, map_key_t , map_val_t, map_val_t);
+typedef map_val_t (*map_get_t) (map_impl_t *, map_key_t );
+typedef map_val_t (*map_remove_t) (map_impl_t *, map_key_t );
+typedef map_val_t (*map_count_t) (map_impl_t *);
+typedef void (*map_print_t) (map_impl_t *);
+typedef void (*map_free_t) (map_impl_t *);
-typedef map_iter_t * (*map_iter_begin_t) (map_impl_t *, void *);
-typedef uint64_t (*map_iter_next_t) (map_iter_t *, void **);
+typedef map_iter_t * (*map_iter_begin_t) (map_impl_t *, map_key_t );
+typedef map_val_t (*map_iter_next_t) (map_iter_t *, map_key_t *);
typedef void (*map_iter_free_t) (map_iter_t *);
struct map_impl {
typedef struct sl_iter sl_iter_t;
skiplist_t * sl_alloc (const datatype_t *key_type);
-uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expected_val, uint64_t new_val);
-uint64_t sl_lookup (skiplist_t *sl, void *key);
-uint64_t sl_remove (skiplist_t *sl, void *key);
-uint64_t sl_count (skiplist_t *sl);
-void sl_print (skiplist_t *sl);
-void sl_free (skiplist_t *sl);
-void * sl_min_key (skiplist_t *sl);
+map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expected_val, map_val_t new_val);
+map_val_t sl_lookup (skiplist_t *sl, map_key_t key);
+map_val_t sl_remove (skiplist_t *sl, map_key_t key);
+map_val_t sl_count (skiplist_t *sl);
+void sl_print (skiplist_t *sl);
+void sl_free (skiplist_t *sl);
+map_key_t sl_min_key (skiplist_t *sl);
-sl_iter_t * sl_iter_begin (skiplist_t *sl, void *key);
-uint64_t sl_iter_next (sl_iter_t *iter, void **key_ptr);
+sl_iter_t * sl_iter_begin (skiplist_t *sl, map_key_t key);
+map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr);
void sl_iter_free (sl_iter_t *iter);
static const map_impl_t sl_map_impl = {
*
* Note: This is code uses synchronous atomic operations because that is all that x86 provides.
* Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
- * the code a bit, but it won't be as fast as it could be on platforms like SPARC that provide
- * weaker operations which would still do the job.
+ * the code a bit, but it won't be as fast as it could be on platforms that provide weaker
+ * operations like and unfenced CAS which would still do the job.
*/
#include <stdio.h>
#define GET_PTR(x) ((void *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
typedef struct entry {
- uint64_t key;
- uint64_t val;
+ uint64_t key;
+ map_val_t val;
} entry_t;
typedef struct hti {
struct ht_iter {
hti_t * hti;
int64_t idx;
- uint64_t key;
- uint64_t val;
};
struct ht {
const datatype_t *key_type;
};
-static const uint64_t COPIED_VALUE = -1;
-static const uint64_t TOMBSTONE = STRIP_TAG(-1, TAG1);
+static const map_val_t COPIED_VALUE = -1;
+static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1);
static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t);
static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2;
//
// Record if the entry being returned is empty. Otherwise the caller will have to waste time
// re-comparing the keys to confirm that it did not lose a race to fill an empty entry.
-static volatile entry_t *hti_lookup (hti_t *hti, void *key, uint32_t key_hash, int *is_empty) {
+static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_hash, int *is_empty) {
TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti);
*is_empty = 0;
assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale));
assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
- uint64_t ht1_ent_val = ht1_ent->val;
+ map_val_t ht1_ent_val = ht1_ent->val;
if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE)) {
TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
return FALSE; // already copied
// Kill empty entries.
if (EXPECT_FALSE(ht1_ent_val == DOES_NOT_EXIST)) {
- uint64_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE);
+ map_val_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE);
if (ht1_ent_val == DOES_NOT_EXIST) {
TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_ent, 0);
return TRUE;
// Install the key in the new table.
uint64_t ht1_ent_key = ht1_ent->key;
- void *key = (ht1->ht->key_type == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key);
+ map_key_t key = (ht1->ht->key_type == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key);
// The old table's dead entries don't need to be copied to the new table, but their keys need to be freed.
assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE, TAG1));
// Copy the value to the entry in the new table.
ht1_ent_val = STRIP_TAG(ht1_ent_val, TAG1);
- uint64_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
+ map_val_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
// If there is a nested copy in progress, we might have installed the key into a dead entry.
if (old_ht2_ent_val == COPIED_VALUE) {
// real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
// <expected> is CAS_EXPECT_WHATEVER then skip the test entirely.
//
-static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expected, uint64_t new) {
+static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_t expected, map_val_t new) {
TRACE("h1", "hti_cas: hti %p key %p", hti, key);
TRACE("h1", "hti_cas: value %p expect %p", new, expected);
assert(hti);
(hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
// If the entry is in the middle of a copy, the copy must be completed first.
- uint64_t ent_val = ent->val;
+ map_val_t ent_val = ent->val;
if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (ent_val != COPIED_VALUE) {
int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
}
// CAS the value into the entry. Retry if it fails.
- uint64_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new);
+ map_val_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new);
if (EXPECT_FALSE(v != ent_val)) {
TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v);
return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call
}
//
-static uint64_t hti_get (hti_t *hti, void *key, uint32_t key_hash) {
+static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) {
int is_empty;
volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
return DOES_NOT_EXIST;
// If the entry is being copied, finish the copy and retry on the next table.
- uint64_t ent_val = ent->val;
+ map_val_t ent_val = ent->val;
if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (EXPECT_FALSE(ent_val != COPIED_VALUE)) {
int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
}
//
-uint64_t ht_get (hashtable_t *ht, void *key) {
+map_val_t ht_get (hashtable_t *ht, map_key_t key) {
uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
return hti_get(ht->hti, key, hash);
}
volatile entry_t *ent;
uint64_t limit;
uint64_t total_copied = hti->num_entries_copied;
- int num_copied = 0;
- int x = hti->copy_scan;
+ uint64_t num_copied = 0;
+ uint64_t x = hti->copy_scan;
TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<<hti->scale);
if (total_copied != (1 << hti->scale)) {
}
//
-uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t new_val) {
+map_val_t ht_cas (hashtable_t *ht, map_key_t key, map_val_t expected_val, map_val_t new_val) {
TRACE("h2", "ht_cas: key %p ht %p", key, ht);
TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val);
}
}
- uint64_t old_val;
+ map_val_t old_val;
uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
assert(hti->next);
// Remove the value in <ht> associated with <key>. Returns the value removed, or DOES_NOT_EXIST if there was
// no value for that key.
-uint64_t ht_remove (hashtable_t *ht, void *key) {
+map_val_t ht_remove (hashtable_t *ht, map_key_t key) {
hti_t *hti = ht->hti;
- uint64_t val;
+ map_val_t val;
uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
do {
val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST);
}
}
-ht_iter_t *ht_iter_begin (hashtable_t *ht, void *key) {
+ht_iter_t *ht_iter_begin (hashtable_t *ht, map_key_t key) {
assert(key == NULL);
hti_t *hti = ht->hti;
int rcount;
return iter;
}
-uint64_t ht_iter_next (ht_iter_t *iter, void **key_ptr) {
+map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) {
volatile entry_t *ent;
- uint64_t key;
- uint64_t val;
+ map_key_t key;
+ map_val_t val;
uint64_t table_size = (1 << iter->hti->scale);
do {
iter->idx++;
return DOES_NOT_EXIST;
}
ent = &iter->hti->table[iter->idx];
- key = ent->key;
+ key = (iter->hti->ht->key_type == NULL) ? (map_key_t)ent->key : GET_PTR(ent->key);
val = ent->val;
} while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE);
if (key_ptr) {
- *key_ptr = (void *)key;
+ *key_ptr = key;
}
if (val == COPIED_VALUE) {
uint32_t hash = (iter->hti->ht->key_type == NULL)
- ? murmur32_8b(key)
- : iter->hti->ht->key_type->hash((void *)key);
- val = hti_get(iter->hti->next, (void *)ent->key, hash);
+ ? murmur32_8b((uint64_t)key)
+ : iter->hti->ht->key_type->hash(key);
+ val = hti_get(iter->hti->next, (map_key_t)ent->key, hash);
}
return val;
typedef struct ll_iter node_t;
struct ll_iter {
- void *key;
- uint64_t val;
+ map_key_t key;
+ map_val_t val;
node_t *next;
};
const datatype_t *key_type;
};
-static node_t *node_alloc (void *key, uint64_t val) {
+static node_t *node_alloc (map_key_t key, map_val_t val) {
node_t *item = (node_t *)nbd_malloc(sizeof(node_t));
item->key = key;
item->val = val;
return count;
}
-static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, void *key, int help_remove) {
+static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, map_key_t key, int help_remove) {
node_t *pred = ll->head;
node_t *item = pred->next;
TRACE("l2", "find_pred: searching for key %p in list (head is %p)", key, pred);
}
// Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor.
-uint64_t ll_lookup (list_t *ll, void *key) {
+map_val_t ll_lookup (list_t *ll, map_key_t key) {
TRACE("l1", "ll_lookup: searching for key %p in list %p", key, ll);
node_t *item;
int found = find_pred(NULL, &item, ll, key, FALSE);
// If we found an <item> matching the key return its value.
if (found) {
- uint64_t val = item->val;
+ map_val_t val = item->val;
if (val != DOES_NOT_EXIST) {
TRACE("l1", "ll_lookup: found item %p. val %p. returning item", item, item->val);
return val;
return DOES_NOT_EXIST;
}
-uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) {
+map_val_t ll_cas (list_t *ll, map_key_t key, map_val_t expectation, map_val_t new_val) {
TRACE("l1", "ll_cas: key %p list %p", key, ll);
TRACE("l1", "ll_cas: expectation %p new value %p", expectation, new_val);
ASSERT((int64_t)new_val > 0);
// Create a new item and insert it into the list.
TRACE("l2", "ll_cas: attempting to insert item between %p and %p", pred, pred->next);
- void *new_key = (ll->key_type == NULL) ? key : ll->key_type->clone(key);
+ map_key_t new_key = (ll->key_type == NULL) ? key : ll->key_type->clone(key);
node_t *new_item = node_alloc(new_key, new_val);
node_t *next = new_item->next = old_item;
node_t *other = SYNC_CAS(&pred->next, next, new_item);
}
// Found an item in the list that matches the key.
- uint64_t old_item_val = old_item->val;
+ map_val_t old_item_val = old_item->val;
do {
// If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us.
if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) {
// replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it
// succeeded and return our value even though we indicated that the node has been removed. If the CAS
// fails it means another thread either removed the node or updated its value.
- uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val);
+ map_val_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val);
if (ret_val == old_item_val) {
TRACE("l1", "ll_cas: the CAS succeeded. updated the value of the item", 0, 0);
return ret_val; // success
} while (1);
}
-uint64_t ll_remove (list_t *ll, void *key) {
+map_val_t ll_remove (list_t *ll, map_key_t key) {
TRACE("l1", "ll_remove: removing item with key %p from list %p", key, ll);
node_t *pred;
node_t *item;
// Atomically swap out the item's value in case another thread is updating the item while we are
// removing it. This establishes which operation occurs first logically, the update or the remove.
- uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
+ map_val_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
TRACE("l2", "ll_remove: replaced item's val %p with DOES_NOT_EXIT", val, 0);
// Unlink <item> from <ll>. If we lose a race to another thread just back off. It is safe to leave the
printf("\n");
}
-ll_iter_t *ll_iter_begin (list_t *ll, void *key) {
+ll_iter_t *ll_iter_begin (list_t *ll, map_key_t key) {
node_t *iter = node_alloc(0,0);
find_pred(NULL, &iter->next, ll, key, FALSE);
return iter;
}
-uint64_t ll_iter_next (ll_iter_t *iter, void **key_ptr) {
+map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr) {
assert(iter);
node_t *item = iter->next;
while (item != NULL && IS_TAGGED(item->next, TAG1)) {
map->impl->print(map->data);
}
-uint64_t map_count (map_t *map) {
+map_val_t map_count (map_t *map) {
return map->impl->count(map->data);
}
-uint64_t map_get (map_t *map, void *key) {
+map_val_t map_get (map_t *map, map_key_t key) {
return map->impl->get(map->data, key);
}
-uint64_t map_set (map_t *map, void *key, uint64_t new_val) {
+map_val_t map_set (map_t *map, map_key_t key, map_val_t new_val) {
return map->impl->cas(map->data, key, CAS_EXPECT_WHATEVER, new_val);
}
-uint64_t map_add (map_t *map, void *key, uint64_t new_val) {
+map_val_t map_add (map_t *map, map_key_t key, map_val_t new_val) {
return map->impl->cas(map->data, key, CAS_EXPECT_DOES_NOT_EXIST, new_val);
}
-uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val) {
+map_val_t map_cas (map_t *map, map_key_t key, map_val_t expected_val, map_val_t new_val) {
return map->impl->cas(map->data, key, expected_val, new_val);
}
-uint64_t map_replace(map_t *map, void *key, uint64_t new_val) {
+map_val_t map_replace(map_t *map, map_key_t key, map_val_t new_val) {
return map->impl->cas(map->data, key, CAS_EXPECT_EXISTS, new_val);
}
-uint64_t map_remove (map_t *map, void *key) {
+map_val_t map_remove (map_t *map, map_key_t key) {
return map->impl->remove(map->data, key);
}
-map_iter_t * map_iter_begin (map_t *map, void *key) {
+map_iter_t * map_iter_begin (map_t *map, map_key_t key) {
map_iter_t *iter = nbd_malloc(sizeof(map_iter_t));
iter->impl = map->impl;
iter->state = map->impl->iter_begin(map->data, key);
return iter;
}
-uint64_t map_iter_next (map_iter_t *iter, void **key_ptr) {
+map_val_t map_iter_next (map_iter_t *iter, map_key_t *key_ptr) {
return iter->impl->iter_next(iter->state, key_ptr);
}
typedef struct sl_iter node_t;
struct sl_iter {
- void *key;
- uint64_t val;
+ map_key_t key;
+ map_val_t val;
int top_level;
node_t *next[];
};
return n;
}
-static node_t *node_alloc (int level, void *key, uint64_t val) {
+static node_t *node_alloc (int level, map_key_t key, map_val_t val) {
assert(level >= 0 && level <= MAX_LEVEL);
size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
node_t *item = (node_t *)nbd_malloc(sz);
return count;
}
-static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, void *key, int help_remove) {
+static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, map_key_t key, int help_remove) {
node_t *pred = sl->head;
node_t *item = NULL;
TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
}
// Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
-uint64_t sl_lookup (skiplist_t *sl, void *key) {
+map_val_t sl_lookup (skiplist_t *sl, map_key_t key) {
TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl);
node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE);
// If we found an <item> matching the <key> return its value.
if (item != NULL) {
- uint64_t val = item->val;
+ map_val_t val = item->val;
if (val != DOES_NOT_EXIST) {
TRACE("s1", "sl_lookup: found item %p. val %p. returning item", item, item->val);
return val;
return DOES_NOT_EXIST;
}
-void *sl_min_key (skiplist_t *sl) {
+map_key_t sl_min_key (skiplist_t *sl) {
node_t *item = sl->head->next[0];
while (item != NULL) {
node_t *next = item->next[0];
return DOES_NOT_EXIST;
}
-uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_val) {
+map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expectation, map_val_t new_val) {
TRACE("s1", "sl_cas: key %p skiplist %p", key, sl);
TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val);
ASSERT((int64_t)new_val > 0);
// First insert <new_item> into the bottom level.
TRACE("s3", "sl_cas: attempting to insert item between %p and %p", preds[0], nexts[0]);
- void *new_key = (sl->key_type == NULL) ? key : sl->key_type->clone(key);
+ map_key_t new_key = (sl->key_type == NULL) ? key : sl->key_type->clone(key);
new_item = node_alloc(n, new_key, new_val);
node_t *pred = preds[0];
node_t *next = new_item->next[0] = nexts[0];
}
// Found an item in the skiplist that matches the key.
- uint64_t old_item_val = old_item->val;
+ map_val_t old_item_val = old_item->val;
do {
// If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us.
if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) {
// replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it
// succeeded and return our value even though we indicated that the node has been removed. If the CAS
// fails it means another thread either removed the node or updated its value.
- uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val);
+ map_val_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val);
if (ret_val == old_item_val) {
TRACE("s1", "sl_cas: the CAS succeeded. updated the value of the item", 0, 0);
return ret_val; // success
return DOES_NOT_EXIST; // success
}
-uint64_t sl_remove (skiplist_t *sl, void *key) {
+map_val_t sl_remove (skiplist_t *sl, map_key_t key) {
TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl);
node_t *preds[MAX_LEVEL+1];
node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE);
// Atomically swap out the item's value in case another thread is updating the item while we are
// removing it. This establishes which operation occurs first logically, the update or the remove.
- uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
+ map_val_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0);
node_t *pred = preds[0];
}
}
-sl_iter_t *sl_iter_begin (skiplist_t *sl, void *key) {
+sl_iter_t *sl_iter_begin (skiplist_t *sl, map_key_t key) {
node_t *iter = node_alloc(0, 0, 0);
find_preds(NULL, &iter->next[0], 0, sl, key, FALSE);
return iter;
}
-uint64_t sl_iter_next (sl_iter_t *iter, void **key_ptr) {
+map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr) {
assert(iter);
node_t *item = iter->next[0];
while (item != NULL && IS_TAGGED(item->next[0], TAG1)) {
/*
* Written by Josh Dybnis and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
+ *
+ * tests ported from high-scale-lib
+ * http://sourceforge.net/projects/high-scale-lib
*/
#include <stdio.h>
#include <pthread.h>
+ optimize integer keys
+ ht_print()
+ iterators
+
+memory manangement
+------------------
- make rcu yield when its buffer gets full instead of throwing an assert
- alternate memory reclamation schemes, hazard pointers and/or reference count
-- investigate 16 byte CAS; ht can store GUIDs inline instead of pointers to actual keys
-- document usage
-- document algorithms
-- port tests from lib-high-scale
-- 32 bit version of hashtable
-- verify list and skiplist work on 32 bit platforms
+- verify key management in list, skiplist, and hashtable
+
+quaility
+--------
- transaction tests
-- validate the arguments to interface functions
+- port perf tests from lib-high-scale
+- characterize the performance of hashtable, list and skiplist
+- validate arguments in interface functions
+- document usage of the library
+- document algorithms
+
+optimization
+------------
+- investigate 16 byte CAS; ht can store GUIDs inline instead of pointers to actual keys
- shortcut from write-set to entries/nodes
- use a shared scan for write-set validation, similar to ht copy logic
-- characterize the performance of hashtable, list and skiplist
- experiment with the performance impact of not passing the hash between functions in ht
- experiment with embedding the keys in the list/skiplist nodes
+
+features
+--------
+- 32 bit version of hashtable
+- verify list and skiplist work on 32 bit platforms
- allow values of 0 to be inserted into maps (change DOES_NOT_EXIST to something else)
-- see if it's possible to rename nbd_malloc to malloc
+- seperate nbd_malloc/nbd_free into general purpose malloc/free replacement