1 #include "internal.h"
\r
7 /****************************************************************************/
\r
8 void test_lfds601_abstraction( void )
\r
11 "Abstraction Tests\n"
\r
12 "=================\n" );
\r
14 abstraction_test_increment();
\r
15 abstraction_test_dcas();
\r
24 /****************************************************************************/
\r
25 void abstraction_test_increment( void )
\r
36 atomic_shared_counter = 0;
\r
38 /* TRD : here we test lfds601_abstraction_increment
\r
40 first, we run one thread per CPU where each thread increments
\r
41 a shared counter 10,000,000 times - however, this first test
\r
42 does NOT use atomic increment; it uses "++"
\r
44 second, we repeat the exercise, but this time using
\r
45 lfds601_abstraction_increment()
\r
47 if the final value in the first test is less than (10,000,000*cpu_count)
\r
48 then the system is sensitive to non-atomic increments; this means if
\r
49 our atomic version of the test passes, we can have some degree of confidence
\r
52 if the final value in the first test is in fact correct, then we can't know
\r
53 that our atomic version has changed anything
\r
55 and of course if the final value in the atomic test is wrong, we know things
\r
59 internal_display_test_name( "Atomic increment" );
\r
61 cpu_count = abstraction_cpu_count();
\r
63 thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
\r
66 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
67 abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_increment, &shared_counter );
\r
69 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
70 abstraction_thread_wait( thread_handles[loop] );
\r
73 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
74 abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, &atomic_shared_counter );
\r
76 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
77 abstraction_thread_wait( thread_handles[loop] );
\r
79 free( thread_handles );
\r
82 if( shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
\r
85 if( shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
\r
86 puts( "indeterminate" );
\r
88 if( atomic_shared_counter < (10000000 * cpu_count) )
\r
98 /****************************************************************************/
\r
99 thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter )
\r
101 volatile lfds601_atom_t
\r
104 /* TRD : lfds601_atom_t must be volatile or the compiler
\r
105 optimizes it away into a single store
\r
108 assert( shared_counter != NULL );
\r
110 while( count++ < 10000000 )
\r
111 (*(lfds601_atom_t *) shared_counter)++;
\r
113 return( (thread_return_t) EXIT_SUCCESS );
\r
120 /****************************************************************************/
\r
121 thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter )
\r
126 assert( shared_counter != NULL );
\r
128 while( count++ < 10000000 )
\r
129 lfds601_abstraction_increment( shared_counter );
\r
131 return( (thread_return_t) EXIT_SUCCESS );
\r
138 /****************************************************************************/
\r
139 void abstraction_test_dcas( void )
\r
148 struct abstraction_test_dcas_state
\r
151 LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) volatile lfds601_atom_t
\r
152 shared_counter[2] = { 0, 0 };
\r
157 /* TRD : here we test lfds601_abstraction_dcas
\r
159 we run one thread per CPU
\r
160 we use lfds601_abstraction_dcas() to increment a shared counter
\r
161 every time a thread successfully increments the counter,
\r
162 it increments a thread local counter
\r
163 the threads run for ten seconds
\r
164 after the threads finish, we total the local counters
\r
165 they should equal the shared counter
\r
168 internal_display_test_name( "Atomic DCAS" );
\r
170 cpu_count = abstraction_cpu_count();
\r
172 atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count );
\r
174 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
176 (atds+loop)->shared_counter = shared_counter;
\r
177 (atds+loop)->local_counter = 0;
\r
180 thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
\r
182 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
183 abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop );
\r
185 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
186 abstraction_thread_wait( thread_handles[loop] );
\r
188 free( thread_handles );
\r
191 for( loop = 0 ; loop < cpu_count ; loop++ )
\r
192 local_total += (atds+loop)->local_counter;
\r
194 if( local_total == shared_counter[0] )
\r
197 if( local_total != shared_counter[0] )
\r
210 /****************************************************************************/
\r
211 thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state )
\r
213 struct abstraction_test_dcas_state
\r
219 LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) lfds601_atom_t
\r
223 assert( abstraction_test_dcas_state != NULL );
\r
225 atds = (struct abstraction_test_dcas_state *) abstraction_test_dcas_state;
\r
227 time( &start_time );
\r
229 while( time(NULL) < start_time + 10 )
\r
231 compare[0] = *atds->shared_counter;
\r
232 compare[1] = *(atds->shared_counter+1);
\r
236 exchange[0] = compare[0] + 1;
\r
237 exchange[1] = compare[1];
\r
239 while( 0 == lfds601_abstraction_dcas(atds->shared_counter, exchange, compare) );
\r
241 atds->local_counter++;
\r
244 return( (thread_return_t) EXIT_SUCCESS );
\r