]> pd.if.org Git - liblfds/blob - liblfds/liblfds7.1.0/test_and_benchmark/libtest/src/libtest_tests/libtest_tests_porting_abstraction_layer_atomic_add.c
Initial import (all versions, including the new 7.1.0)
[liblfds] / liblfds / liblfds7.1.0 / test_and_benchmark / libtest / src / libtest_tests / libtest_tests_porting_abstraction_layer_atomic_add.c
1 /***** includes *****/
2 #include "libtest_tests_internal.h"
3
4 /***** private prototyps *****/
5 static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_add( void *libtest_threadset_per_thread_state );
6 static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_add( void *libtest_threadset_per_thread_state );
7
8
9
10
11
12 /****************************************************************************/
13 void libtest_tests_pal_atomic_add( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
14 {
15   lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
16     number_logical_processors,
17     atomic_shared_counter,
18     shared_counter;
19
20   lfds710_pal_uint_t
21     loop = 0;
22
23   struct lfds710_list_asu_element
24     *lasue = NULL;
25
26   struct libtest_logical_processor
27     *lp;
28
29   struct libtest_threadset_per_thread_state
30     *pts;
31
32   struct libtest_threadset_state
33     ts;
34
35   LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
36   LFDS710_PAL_ASSERT( ms != NULL );
37   LFDS710_PAL_ASSERT( dvs != NULL );
38
39   /* TRD : here we test abstraction_atomic_add
40
41            first, we run one thread per CPU where each thread adds
42            a shared counter 10,000,000 times - however, this first test
43            does NOT use atomic add; it uses "++"
44
45            second, we repeat the exercise, but this time using
46            abstraction_add()
47
48            if the final value in the first test is less than (10,000,000*asi->number_of_components[LFDS710_ABSTRACTION_COMPONENT_LOGICAL_PROCESSOR])
49            then the system is sensitive to non-atomic adds; this means if
50            our atomic version of the test passes, we can have some degree of confidence
51            that it works
52
53            if the final value in the first test is in fact correct, then we can't know
54            that our atomic version has changed anything
55
56            and of course if the final value in the atomic test is wrong, we know things
57            are broken
58   */
59
60   // TRD : allocate
61   lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
62   pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
63
64   shared_counter = 0;
65   atomic_shared_counter = 0;
66
67   LFDS710_MISC_BARRIER_STORE;
68
69   // TRD : non-atomic
70
71   libtest_threadset_init( &ts, NULL );
72
73   while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
74   {
75     lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
76     libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_add, (void *) &shared_counter );
77     loop++;
78   }
79
80   LFDS710_MISC_BARRIER_STORE;
81   lfds710_misc_force_store();
82   libtest_threadset_run( &ts );
83   libtest_threadset_cleanup( &ts );
84
85   // TRD : atomic
86
87   libtest_threadset_init( &ts, NULL );
88
89   loop = 0;
90   lasue = NULL;
91
92   while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
93   {
94     lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
95     libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_atomic_add, (void *) &atomic_shared_counter );
96     loop++;
97   }
98
99   LFDS710_MISC_BARRIER_STORE;
100   lfds710_misc_force_store();
101   libtest_threadset_run( &ts );
102   libtest_threadset_cleanup( &ts );
103   LFDS710_MISC_BARRIER_LOAD;
104
105   /* TRD : results
106
107            on a single core, "++" and atomic add should be equal
108
109            if we find our non-atomic test passes, then we can't really say anything
110            about whether or not the atomic test is really working
111   */
112
113   if( number_logical_processors == 1 )
114   {
115     if( shared_counter == (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
116       *dvs = LFDS710_MISC_VALIDITY_VALID;
117
118     if( shared_counter != (10000000 * number_logical_processors) or atomic_shared_counter != (10000000 * number_logical_processors) )
119       *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
120   }
121
122   if( number_logical_processors >= 2 )
123   {
124     if( shared_counter < (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
125       *dvs = LFDS710_MISC_VALIDITY_VALID;
126
127     if( shared_counter == (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
128       *dvs = LFDS710_MISC_VALIDITY_INDETERMINATE_NONATOMIC_PASSED;
129
130     if( atomic_shared_counter < (10000000 * number_logical_processors) )
131       *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
132   }
133
134   return;
135 }
136
137
138
139
140
141 /****************************************************************************/
142 static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_add( void *libtest_threadset_per_thread_state )
143 {
144   struct libtest_threadset_per_thread_state
145     *pts;
146
147   lfds710_pal_uint_t volatile
148     *shared_counter;
149
150   lfds710_pal_uint_t volatile
151     count = 0;
152
153   LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
154
155   LFDS710_MISC_BARRIER_LOAD;
156
157   pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
158   shared_counter = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
159
160   libtest_threadset_thread_ready_and_wait( pts );
161
162   while( count++ < 10000000 )
163     (*shared_counter)++;
164
165   return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
166 }
167
168
169
170
171
172 /****************************************************************************/
173 static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_add( void *libtest_threadset_per_thread_state )
174 {
175   struct libtest_threadset_per_thread_state
176     *pts;
177
178   lfds710_pal_uint_t volatile
179     result,
180     *shared_counter;
181
182   lfds710_pal_uint_t volatile
183     count = 0;
184
185   LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
186
187   LFDS710_MISC_BARRIER_LOAD;
188
189   pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
190   shared_counter = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
191
192   libtest_threadset_thread_ready_and_wait( pts );
193
194   while( count++ < 10000000 )
195   {
196     LFDS710_PAL_ATOMIC_ADD( shared_counter, 1, result, lfds710_pal_uint_t );
197     (void) result;
198   }
199
200   return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
201 }
202