2 #define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
3 #define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
4 #define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
5 #define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
6 #define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
8 /***** structures *****/
9 struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
11 struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
19 struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state
21 /* TRD : the pointers are on separate cache lines so threads enqueuing do not
22 physically collide with thread dequeuing; this is done to be fair in
23 the benchmark to the lock-free code, which does the same
25 since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
26 bytes apart (e.g. the ERG on ARM), only cache line length in bytes
29 struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
33 pal_lock_pthread_spinlock_process_private_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
41 /***** public prototypes *****/
42 void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, void *user_state );
43 void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, enum flag dummy_flag) );
45 void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe );
46 int libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element **qe );